diff options
author | David Howells <dhowells@redhat.com> | 2006-12-05 09:37:56 -0500 |
---|---|---|
committer | David Howells <dhowells@warthog.cambridge.redhat.com> | 2006-12-05 09:37:56 -0500 |
commit | 4c1ac1b49122b805adfa4efc620592f68dccf5db (patch) | |
tree | 87557f4bc2fd4fe65b7570489c2f610c45c0adcd /net | |
parent | c4028958b6ecad064b1a6303a6a5906d4fe48d73 (diff) | |
parent | d916faace3efc0bf19fe9a615a1ab8fa1a24cd93 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/infiniband/core/iwcm.c
drivers/net/chelsio/cxgb2.c
drivers/net/wireless/bcm43xx/bcm43xx_main.c
drivers/net/wireless/prism54/islpci_eth.c
drivers/usb/core/hub.h
drivers/usb/input/hid-core.c
net/core/netpoll.c
Fix up merge failures with Linus's head and fix new compilation failures.
Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'net')
364 files changed, 21212 insertions, 7798 deletions
diff --git a/net/802/hippi.c b/net/802/hippi.c index 6d7fed3dd99a..579e2ddf5ebe 100644 --- a/net/802/hippi.c +++ b/net/802/hippi.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <net/arp.h> | 36 | #include <net/arp.h> |
37 | #include <net/sock.h> | 37 | #include <net/sock.h> |
38 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
39 | #include <asm/checksum.h> | ||
40 | #include <asm/system.h> | 39 | #include <asm/system.h> |
41 | 40 | ||
42 | /* | 41 | /* |
diff --git a/net/Kconfig b/net/Kconfig index 67e39ad8b8b6..7dfc94920697 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -75,7 +75,7 @@ config NETWORK_SECMARK | |||
75 | If you are unsure how to answer this question, answer N. | 75 | If you are unsure how to answer this question, answer N. |
76 | 76 | ||
77 | menuconfig NETFILTER | 77 | menuconfig NETFILTER |
78 | bool "Network packet filtering (replaces ipchains)" | 78 | bool "Network packet filtering framework (Netfilter)" |
79 | ---help--- | 79 | ---help--- |
80 | Netfilter is a framework for filtering and mangling network packets | 80 | Netfilter is a framework for filtering and mangling network packets |
81 | that pass through your Linux box. | 81 | that pass through your Linux box. |
@@ -175,33 +175,6 @@ source "net/ipx/Kconfig" | |||
175 | source "drivers/net/appletalk/Kconfig" | 175 | source "drivers/net/appletalk/Kconfig" |
176 | source "net/x25/Kconfig" | 176 | source "net/x25/Kconfig" |
177 | source "net/lapb/Kconfig" | 177 | source "net/lapb/Kconfig" |
178 | |||
179 | config NET_DIVERT | ||
180 | bool "Frame Diverter (EXPERIMENTAL)" | ||
181 | depends on EXPERIMENTAL && BROKEN | ||
182 | ---help--- | ||
183 | The Frame Diverter allows you to divert packets from the | ||
184 | network, that are not aimed at the interface receiving it (in | ||
185 | promisc. mode). Typically, a Linux box setup as an Ethernet bridge | ||
186 | with the Frames Diverter on, can do some *really* transparent www | ||
187 | caching using a Squid proxy for example. | ||
188 | |||
189 | This is very useful when you don't want to change your router's | ||
190 | config (or if you simply don't have access to it). | ||
191 | |||
192 | The other possible usages of diverting Ethernet Frames are | ||
193 | numberous: | ||
194 | - reroute smtp traffic to another interface | ||
195 | - traffic-shape certain network streams | ||
196 | - transparently proxy smtp connections | ||
197 | - etc... | ||
198 | |||
199 | For more informations, please refer to: | ||
200 | <http://diverter.sourceforge.net/> | ||
201 | <http://perso.wanadoo.fr/magpie/EtherDivert.html> | ||
202 | |||
203 | If unsure, say N. | ||
204 | |||
205 | source "net/econet/Kconfig" | 178 | source "net/econet/Kconfig" |
206 | source "net/wanrouter/Kconfig" | 179 | source "net/wanrouter/Kconfig" |
207 | source "net/sched/Kconfig" | 180 | source "net/sched/Kconfig" |
diff --git a/net/atm/Makefile b/net/atm/Makefile index 89656d6c0b90..cc50bd1ff1de 100644 --- a/net/atm/Makefile +++ b/net/atm/Makefile | |||
@@ -7,10 +7,7 @@ mpoa-objs := mpc.o mpoa_caches.o mpoa_proc.o | |||
7 | 7 | ||
8 | obj-$(CONFIG_ATM) += atm.o | 8 | obj-$(CONFIG_ATM) += atm.o |
9 | obj-$(CONFIG_ATM_CLIP) += clip.o | 9 | obj-$(CONFIG_ATM_CLIP) += clip.o |
10 | atm-$(subst m,y,$(CONFIG_ATM_CLIP)) += ipcommon.o | ||
11 | obj-$(CONFIG_ATM_BR2684) += br2684.o | 10 | obj-$(CONFIG_ATM_BR2684) += br2684.o |
12 | atm-$(subst m,y,$(CONFIG_ATM_BR2684)) += ipcommon.o | ||
13 | atm-$(subst m,y,$(CONFIG_NET_SCH_ATM)) += ipcommon.o | ||
14 | atm-$(CONFIG_PROC_FS) += proc.o | 11 | atm-$(CONFIG_PROC_FS) += proc.o |
15 | 12 | ||
16 | obj-$(CONFIG_ATM_LANE) += lec.o | 13 | obj-$(CONFIG_ATM_LANE) += lec.o |
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index d00cca97eb33..83a1c1b1d6cd 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c | |||
@@ -23,7 +23,6 @@ Author: Marcell GAL, 2000, XDSL Ltd, Hungary | |||
23 | #include <linux/atmbr2684.h> | 23 | #include <linux/atmbr2684.h> |
24 | 24 | ||
25 | #include "common.h" | 25 | #include "common.h" |
26 | #include "ipcommon.h" | ||
27 | 26 | ||
28 | /* | 27 | /* |
29 | * Define this to use a version of the code which interacts with the higher | 28 | * Define this to use a version of the code which interacts with the higher |
@@ -372,7 +371,7 @@ static int br2684_setfilt(struct atm_vcc *atmvcc, void __user *arg) | |||
372 | 371 | ||
373 | /* Returns 1 if packet should be dropped */ | 372 | /* Returns 1 if packet should be dropped */ |
374 | static inline int | 373 | static inline int |
375 | packet_fails_filter(u16 type, struct br2684_vcc *brvcc, struct sk_buff *skb) | 374 | packet_fails_filter(__be16 type, struct br2684_vcc *brvcc, struct sk_buff *skb) |
376 | { | 375 | { |
377 | if (brvcc->filter.netmask == 0) | 376 | if (brvcc->filter.netmask == 0) |
378 | return 0; /* no filter in place */ | 377 | return 0; /* no filter in place */ |
@@ -500,11 +499,12 @@ Note: we do not have explicit unassign, but look at _push() | |||
500 | */ | 499 | */ |
501 | int err; | 500 | int err; |
502 | struct br2684_vcc *brvcc; | 501 | struct br2684_vcc *brvcc; |
503 | struct sk_buff_head copy; | ||
504 | struct sk_buff *skb; | 502 | struct sk_buff *skb; |
503 | struct sk_buff_head *rq; | ||
505 | struct br2684_dev *brdev; | 504 | struct br2684_dev *brdev; |
506 | struct net_device *net_dev; | 505 | struct net_device *net_dev; |
507 | struct atm_backend_br2684 be; | 506 | struct atm_backend_br2684 be; |
507 | unsigned long flags; | ||
508 | 508 | ||
509 | if (copy_from_user(&be, arg, sizeof be)) | 509 | if (copy_from_user(&be, arg, sizeof be)) |
510 | return -EFAULT; | 510 | return -EFAULT; |
@@ -554,12 +554,30 @@ Note: we do not have explicit unassign, but look at _push() | |||
554 | brvcc->old_push = atmvcc->push; | 554 | brvcc->old_push = atmvcc->push; |
555 | barrier(); | 555 | barrier(); |
556 | atmvcc->push = br2684_push; | 556 | atmvcc->push = br2684_push; |
557 | skb_queue_head_init(©); | 557 | |
558 | skb_migrate(&sk_atm(atmvcc)->sk_receive_queue, ©); | 558 | rq = &sk_atm(atmvcc)->sk_receive_queue; |
559 | while ((skb = skb_dequeue(©)) != NULL) { | 559 | |
560 | spin_lock_irqsave(&rq->lock, flags); | ||
561 | if (skb_queue_empty(rq)) { | ||
562 | skb = NULL; | ||
563 | } else { | ||
564 | /* NULL terminate the list. */ | ||
565 | rq->prev->next = NULL; | ||
566 | skb = rq->next; | ||
567 | } | ||
568 | rq->prev = rq->next = (struct sk_buff *)rq; | ||
569 | rq->qlen = 0; | ||
570 | spin_unlock_irqrestore(&rq->lock, flags); | ||
571 | |||
572 | while (skb) { | ||
573 | struct sk_buff *next = skb->next; | ||
574 | |||
575 | skb->next = skb->prev = NULL; | ||
560 | BRPRIV(skb->dev)->stats.rx_bytes -= skb->len; | 576 | BRPRIV(skb->dev)->stats.rx_bytes -= skb->len; |
561 | BRPRIV(skb->dev)->stats.rx_packets--; | 577 | BRPRIV(skb->dev)->stats.rx_packets--; |
562 | br2684_push(atmvcc, skb); | 578 | br2684_push(atmvcc, skb); |
579 | |||
580 | skb = next; | ||
563 | } | 581 | } |
564 | __module_get(THIS_MODULE); | 582 | __module_get(THIS_MODULE); |
565 | return 0; | 583 | return 0; |
diff --git a/net/atm/clip.c b/net/atm/clip.c index 7af2c411da82..5f8a1d222720 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -38,7 +38,6 @@ | |||
38 | 38 | ||
39 | #include "common.h" | 39 | #include "common.h" |
40 | #include "resources.h" | 40 | #include "resources.h" |
41 | #include "ipcommon.h" | ||
42 | #include <net/atmclip.h> | 41 | #include <net/atmclip.h> |
43 | 42 | ||
44 | 43 | ||
@@ -54,7 +53,7 @@ static struct atm_vcc *atmarpd; | |||
54 | static struct neigh_table clip_tbl; | 53 | static struct neigh_table clip_tbl; |
55 | static struct timer_list idle_timer; | 54 | static struct timer_list idle_timer; |
56 | 55 | ||
57 | static int to_atmarpd(enum atmarp_ctrl_type type, int itf, unsigned long ip) | 56 | static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip) |
58 | { | 57 | { |
59 | struct sock *sk; | 58 | struct sock *sk; |
60 | struct atmarp_ctrl *ctrl; | 59 | struct atmarp_ctrl *ctrl; |
@@ -220,7 +219,7 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb) | |||
220 | || memcmp(skb->data, llc_oui, sizeof (llc_oui))) | 219 | || memcmp(skb->data, llc_oui, sizeof (llc_oui))) |
221 | skb->protocol = htons(ETH_P_IP); | 220 | skb->protocol = htons(ETH_P_IP); |
222 | else { | 221 | else { |
223 | skb->protocol = ((u16 *) skb->data)[3]; | 222 | skb->protocol = ((__be16 *) skb->data)[3]; |
224 | skb_pull(skb, RFC1483LLC_LEN); | 223 | skb_pull(skb, RFC1483LLC_LEN); |
225 | if (skb->protocol == htons(ETH_P_ARP)) { | 224 | if (skb->protocol == htons(ETH_P_ARP)) { |
226 | PRIV(skb->dev)->stats.rx_packets++; | 225 | PRIV(skb->dev)->stats.rx_packets++; |
@@ -430,7 +429,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
430 | 429 | ||
431 | here = skb_push(skb, RFC1483LLC_LEN); | 430 | here = skb_push(skb, RFC1483LLC_LEN); |
432 | memcpy(here, llc_oui, sizeof(llc_oui)); | 431 | memcpy(here, llc_oui, sizeof(llc_oui)); |
433 | ((u16 *) here)[3] = skb->protocol; | 432 | ((__be16 *) here)[3] = skb->protocol; |
434 | } | 433 | } |
435 | atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); | 434 | atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); |
436 | ATM_SKB(skb)->atm_options = vcc->atm_options; | 435 | ATM_SKB(skb)->atm_options = vcc->atm_options; |
@@ -469,8 +468,9 @@ static struct net_device_stats *clip_get_stats(struct net_device *dev) | |||
469 | static int clip_mkip(struct atm_vcc *vcc, int timeout) | 468 | static int clip_mkip(struct atm_vcc *vcc, int timeout) |
470 | { | 469 | { |
471 | struct clip_vcc *clip_vcc; | 470 | struct clip_vcc *clip_vcc; |
472 | struct sk_buff_head copy; | ||
473 | struct sk_buff *skb; | 471 | struct sk_buff *skb; |
472 | struct sk_buff_head *rq; | ||
473 | unsigned long flags; | ||
474 | 474 | ||
475 | if (!vcc->push) | 475 | if (!vcc->push) |
476 | return -EBADFD; | 476 | return -EBADFD; |
@@ -490,10 +490,26 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout) | |||
490 | clip_vcc->old_pop = vcc->pop; | 490 | clip_vcc->old_pop = vcc->pop; |
491 | vcc->push = clip_push; | 491 | vcc->push = clip_push; |
492 | vcc->pop = clip_pop; | 492 | vcc->pop = clip_pop; |
493 | skb_queue_head_init(©); | 493 | |
494 | skb_migrate(&sk_atm(vcc)->sk_receive_queue, ©); | 494 | rq = &sk_atm(vcc)->sk_receive_queue; |
495 | |||
496 | spin_lock_irqsave(&rq->lock, flags); | ||
497 | if (skb_queue_empty(rq)) { | ||
498 | skb = NULL; | ||
499 | } else { | ||
500 | /* NULL terminate the list. */ | ||
501 | rq->prev->next = NULL; | ||
502 | skb = rq->next; | ||
503 | } | ||
504 | rq->prev = rq->next = (struct sk_buff *)rq; | ||
505 | rq->qlen = 0; | ||
506 | spin_unlock_irqrestore(&rq->lock, flags); | ||
507 | |||
495 | /* re-process everything received between connection setup and MKIP */ | 508 | /* re-process everything received between connection setup and MKIP */ |
496 | while ((skb = skb_dequeue(©)) != NULL) | 509 | while (skb) { |
510 | struct sk_buff *next = skb->next; | ||
511 | |||
512 | skb->next = skb->prev = NULL; | ||
497 | if (!clip_devs) { | 513 | if (!clip_devs) { |
498 | atm_return(vcc, skb->truesize); | 514 | atm_return(vcc, skb->truesize); |
499 | kfree_skb(skb); | 515 | kfree_skb(skb); |
@@ -506,10 +522,13 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout) | |||
506 | PRIV(skb->dev)->stats.rx_bytes -= len; | 522 | PRIV(skb->dev)->stats.rx_bytes -= len; |
507 | kfree_skb(skb); | 523 | kfree_skb(skb); |
508 | } | 524 | } |
525 | |||
526 | skb = next; | ||
527 | } | ||
509 | return 0; | 528 | return 0; |
510 | } | 529 | } |
511 | 530 | ||
512 | static int clip_setentry(struct atm_vcc *vcc, u32 ip) | 531 | static int clip_setentry(struct atm_vcc *vcc, __be32 ip) |
513 | { | 532 | { |
514 | struct neighbour *neigh; | 533 | struct neighbour *neigh; |
515 | struct atmarp_entry *entry; | 534 | struct atmarp_entry *entry; |
@@ -752,7 +771,7 @@ static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
752 | err = clip_mkip(vcc, arg); | 771 | err = clip_mkip(vcc, arg); |
753 | break; | 772 | break; |
754 | case ATMARP_SETENTRY: | 773 | case ATMARP_SETENTRY: |
755 | err = clip_setentry(vcc, arg); | 774 | err = clip_setentry(vcc, (__force __be32)arg); |
756 | break; | 775 | break; |
757 | case ATMARP_ENCAP: | 776 | case ATMARP_ENCAP: |
758 | err = clip_encap(vcc, arg); | 777 | err = clip_encap(vcc, arg); |
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c deleted file mode 100644 index 1d3de42fada0..000000000000 --- a/net/atm/ipcommon.c +++ /dev/null | |||
@@ -1,63 +0,0 @@ | |||
1 | /* net/atm/ipcommon.c - Common items for all ways of doing IP over ATM */ | ||
2 | |||
3 | /* Written 1996-2000 by Werner Almesberger, EPFL LRC/ICA */ | ||
4 | |||
5 | |||
6 | #include <linux/module.h> | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/skbuff.h> | ||
9 | #include <linux/netdevice.h> | ||
10 | #include <linux/in.h> | ||
11 | #include <linux/atmdev.h> | ||
12 | #include <linux/atmclip.h> | ||
13 | |||
14 | #include "common.h" | ||
15 | #include "ipcommon.h" | ||
16 | |||
17 | |||
18 | #if 0 | ||
19 | #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) | ||
20 | #else | ||
21 | #define DPRINTK(format,args...) | ||
22 | #endif | ||
23 | |||
24 | |||
25 | /* | ||
26 | * skb_migrate appends the list at "from" to "to", emptying "from" in the | ||
27 | * process. skb_migrate is atomic with respect to all other skb operations on | ||
28 | * "from" and "to". Note that it locks both lists at the same time, so to deal | ||
29 | * with the lock ordering, the locks are taken in address order. | ||
30 | * | ||
31 | * This function should live in skbuff.c or skbuff.h. | ||
32 | */ | ||
33 | |||
34 | |||
35 | void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to) | ||
36 | { | ||
37 | unsigned long flags; | ||
38 | struct sk_buff *skb_from = (struct sk_buff *) from; | ||
39 | struct sk_buff *skb_to = (struct sk_buff *) to; | ||
40 | struct sk_buff *prev; | ||
41 | |||
42 | if ((unsigned long) from < (unsigned long) to) { | ||
43 | spin_lock_irqsave(&from->lock, flags); | ||
44 | spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING); | ||
45 | } else { | ||
46 | spin_lock_irqsave(&to->lock, flags); | ||
47 | spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING); | ||
48 | } | ||
49 | prev = from->prev; | ||
50 | from->next->prev = to->prev; | ||
51 | prev->next = skb_to; | ||
52 | to->prev->next = from->next; | ||
53 | to->prev = from->prev; | ||
54 | to->qlen += from->qlen; | ||
55 | spin_unlock(&to->lock); | ||
56 | from->prev = skb_from; | ||
57 | from->next = skb_from; | ||
58 | from->qlen = 0; | ||
59 | spin_unlock_irqrestore(&from->lock, flags); | ||
60 | } | ||
61 | |||
62 | |||
63 | EXPORT_SYMBOL(skb_migrate); | ||
diff --git a/net/atm/ipcommon.h b/net/atm/ipcommon.h deleted file mode 100644 index d72165f60939..000000000000 --- a/net/atm/ipcommon.h +++ /dev/null | |||
@@ -1,22 +0,0 @@ | |||
1 | /* net/atm/ipcommon.h - Common items for all ways of doing IP over ATM */ | ||
2 | |||
3 | /* Written 1996-2000 by Werner Almesberger, EPFL LRC/ICA */ | ||
4 | |||
5 | |||
6 | #ifndef NET_ATM_IPCOMMON_H | ||
7 | #define NET_ATM_IPCOMMON_H | ||
8 | |||
9 | |||
10 | #include <linux/string.h> | ||
11 | #include <linux/skbuff.h> | ||
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/atmdev.h> | ||
14 | |||
15 | /* | ||
16 | * Appends all skbs from "from" to "to". The operation is atomic with respect | ||
17 | * to all other skb operations on "from" or "to". | ||
18 | */ | ||
19 | |||
20 | void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to); | ||
21 | |||
22 | #endif | ||
diff --git a/net/atm/lec.c b/net/atm/lec.c index e801fff69dc0..3fc0abeeaf34 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -204,9 +204,9 @@ static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc) | |||
204 | memset(rdesc, 0, ETH_ALEN); | 204 | memset(rdesc, 0, ETH_ALEN); |
205 | /* offset 4 comes from LAN destination field in LE control frames */ | 205 | /* offset 4 comes from LAN destination field in LE control frames */ |
206 | if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT)) | 206 | if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT)) |
207 | memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(uint16_t)); | 207 | memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16)); |
208 | else { | 208 | else { |
209 | memcpy(&rdesc[4], &trh->rseg[1], sizeof(uint16_t)); | 209 | memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16)); |
210 | rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0)); | 210 | rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0)); |
211 | } | 211 | } |
212 | 212 | ||
@@ -775,7 +775,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) | |||
775 | unsigned char *src, *dst; | 775 | unsigned char *src, *dst; |
776 | 776 | ||
777 | atm_return(vcc, skb->truesize); | 777 | atm_return(vcc, skb->truesize); |
778 | if (*(uint16_t *) skb->data == htons(priv->lecid) || | 778 | if (*(__be16 *) skb->data == htons(priv->lecid) || |
779 | !priv->lecd || !(dev->flags & IFF_UP)) { | 779 | !priv->lecd || !(dev->flags & IFF_UP)) { |
780 | /* | 780 | /* |
781 | * Probably looping back, or if lecd is missing, | 781 | * Probably looping back, or if lecd is missing, |
@@ -1321,11 +1321,10 @@ static int lane2_resolve(struct net_device *dev, u8 *dst_mac, int force, | |||
1321 | if (table == NULL) | 1321 | if (table == NULL) |
1322 | return -1; | 1322 | return -1; |
1323 | 1323 | ||
1324 | *tlvs = kmalloc(table->sizeoftlvs, GFP_ATOMIC); | 1324 | *tlvs = kmemdup(table->tlvs, table->sizeoftlvs, GFP_ATOMIC); |
1325 | if (*tlvs == NULL) | 1325 | if (*tlvs == NULL) |
1326 | return -1; | 1326 | return -1; |
1327 | 1327 | ||
1328 | memcpy(*tlvs, table->tlvs, table->sizeoftlvs); | ||
1329 | *sizeoftlvs = table->sizeoftlvs; | 1328 | *sizeoftlvs = table->sizeoftlvs; |
1330 | 1329 | ||
1331 | return 0; | 1330 | return 0; |
@@ -1364,11 +1363,10 @@ static int lane2_associate_req(struct net_device *dev, u8 *lan_dst, | |||
1364 | 1363 | ||
1365 | kfree(priv->tlvs); /* NULL if there was no previous association */ | 1364 | kfree(priv->tlvs); /* NULL if there was no previous association */ |
1366 | 1365 | ||
1367 | priv->tlvs = kmalloc(sizeoftlvs, GFP_KERNEL); | 1366 | priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); |
1368 | if (priv->tlvs == NULL) | 1367 | if (priv->tlvs == NULL) |
1369 | return (0); | 1368 | return (0); |
1370 | priv->sizeoftlvs = sizeoftlvs; | 1369 | priv->sizeoftlvs = sizeoftlvs; |
1371 | memcpy(priv->tlvs, tlvs, sizeoftlvs); | ||
1372 | 1370 | ||
1373 | skb = alloc_skb(sizeoftlvs, GFP_ATOMIC); | 1371 | skb = alloc_skb(sizeoftlvs, GFP_ATOMIC); |
1374 | if (skb == NULL) | 1372 | if (skb == NULL) |
@@ -1409,12 +1407,10 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr, | |||
1409 | 1407 | ||
1410 | kfree(entry->tlvs); | 1408 | kfree(entry->tlvs); |
1411 | 1409 | ||
1412 | entry->tlvs = kmalloc(sizeoftlvs, GFP_KERNEL); | 1410 | entry->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); |
1413 | if (entry->tlvs == NULL) | 1411 | if (entry->tlvs == NULL) |
1414 | return; | 1412 | return; |
1415 | |||
1416 | entry->sizeoftlvs = sizeoftlvs; | 1413 | entry->sizeoftlvs = sizeoftlvs; |
1417 | memcpy(entry->tlvs, tlvs, sizeoftlvs); | ||
1418 | #endif | 1414 | #endif |
1419 | #if 0 | 1415 | #if 0 |
1420 | printk("lec.c: lane2_associate_ind()\n"); | 1416 | printk("lec.c: lane2_associate_ind()\n"); |
diff --git a/net/atm/lec.h b/net/atm/lec.h index 984e8e6e083a..99136babd535 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h | |||
@@ -14,14 +14,14 @@ | |||
14 | #define LEC_HEADER_LEN 16 | 14 | #define LEC_HEADER_LEN 16 |
15 | 15 | ||
16 | struct lecdatahdr_8023 { | 16 | struct lecdatahdr_8023 { |
17 | unsigned short le_header; | 17 | __be16 le_header; |
18 | unsigned char h_dest[ETH_ALEN]; | 18 | unsigned char h_dest[ETH_ALEN]; |
19 | unsigned char h_source[ETH_ALEN]; | 19 | unsigned char h_source[ETH_ALEN]; |
20 | unsigned short h_type; | 20 | __be16 h_type; |
21 | }; | 21 | }; |
22 | 22 | ||
23 | struct lecdatahdr_8025 { | 23 | struct lecdatahdr_8025 { |
24 | unsigned short le_header; | 24 | __be16 le_header; |
25 | unsigned char ac_pad; | 25 | unsigned char ac_pad; |
26 | unsigned char fc; | 26 | unsigned char fc; |
27 | unsigned char h_dest[ETH_ALEN]; | 27 | unsigned char h_dest[ETH_ALEN]; |
diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 0d2b994af511..c18f73715ef9 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c | |||
@@ -152,7 +152,7 @@ static struct mpoa_client *find_mpc_by_lec(struct net_device *dev) | |||
152 | /* | 152 | /* |
153 | * Overwrites the old entry or makes a new one. | 153 | * Overwrites the old entry or makes a new one. |
154 | */ | 154 | */ |
155 | struct atm_mpoa_qos *atm_mpoa_add_qos(uint32_t dst_ip, struct atm_qos *qos) | 155 | struct atm_mpoa_qos *atm_mpoa_add_qos(__be32 dst_ip, struct atm_qos *qos) |
156 | { | 156 | { |
157 | struct atm_mpoa_qos *entry; | 157 | struct atm_mpoa_qos *entry; |
158 | 158 | ||
@@ -177,7 +177,7 @@ struct atm_mpoa_qos *atm_mpoa_add_qos(uint32_t dst_ip, struct atm_qos *qos) | |||
177 | return entry; | 177 | return entry; |
178 | } | 178 | } |
179 | 179 | ||
180 | struct atm_mpoa_qos *atm_mpoa_search_qos(uint32_t dst_ip) | 180 | struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip) |
181 | { | 181 | { |
182 | struct atm_mpoa_qos *qos; | 182 | struct atm_mpoa_qos *qos; |
183 | 183 | ||
@@ -460,11 +460,11 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc) | |||
460 | in_cache_entry *entry; | 460 | in_cache_entry *entry; |
461 | struct iphdr *iph; | 461 | struct iphdr *iph; |
462 | char *buff; | 462 | char *buff; |
463 | uint32_t ipaddr = 0; | 463 | __be32 ipaddr = 0; |
464 | 464 | ||
465 | static struct { | 465 | static struct { |
466 | struct llc_snap_hdr hdr; | 466 | struct llc_snap_hdr hdr; |
467 | uint32_t tag; | 467 | __be32 tag; |
468 | } tagged_llc_snap_hdr = { | 468 | } tagged_llc_snap_hdr = { |
469 | {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}, {0x88, 0x4c}}, | 469 | {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}, {0x88, 0x4c}}, |
470 | 0 | 470 | 0 |
@@ -559,7 +559,7 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg) | |||
559 | struct mpoa_client *mpc; | 559 | struct mpoa_client *mpc; |
560 | struct atmmpc_ioc ioc_data; | 560 | struct atmmpc_ioc ioc_data; |
561 | in_cache_entry *in_entry; | 561 | in_cache_entry *in_entry; |
562 | uint32_t ipaddr; | 562 | __be32 ipaddr; |
563 | 563 | ||
564 | bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc)); | 564 | bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc)); |
565 | if (bytes_left != 0) { | 565 | if (bytes_left != 0) { |
@@ -638,7 +638,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) | |||
638 | struct sk_buff *new_skb; | 638 | struct sk_buff *new_skb; |
639 | eg_cache_entry *eg; | 639 | eg_cache_entry *eg; |
640 | struct mpoa_client *mpc; | 640 | struct mpoa_client *mpc; |
641 | uint32_t tag; | 641 | __be32 tag; |
642 | char *tmp; | 642 | char *tmp; |
643 | 643 | ||
644 | ddprintk("mpoa: (%s) mpc_push:\n", dev->name); | 644 | ddprintk("mpoa: (%s) mpc_push:\n", dev->name); |
@@ -683,7 +683,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) | |||
683 | } | 683 | } |
684 | 684 | ||
685 | tmp = skb->data + sizeof(struct llc_snap_hdr); | 685 | tmp = skb->data + sizeof(struct llc_snap_hdr); |
686 | tag = *(uint32_t *)tmp; | 686 | tag = *(__be32 *)tmp; |
687 | 687 | ||
688 | eg = mpc->eg_ops->get_by_tag(tag, mpc); | 688 | eg = mpc->eg_ops->get_by_tag(tag, mpc); |
689 | if (eg == NULL) { | 689 | if (eg == NULL) { |
@@ -1029,7 +1029,7 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo | |||
1029 | 1029 | ||
1030 | static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc) | 1030 | static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc) |
1031 | { | 1031 | { |
1032 | uint32_t dst_ip = msg->content.in_info.in_dst_ip; | 1032 | __be32 dst_ip = msg->content.in_info.in_dst_ip; |
1033 | in_cache_entry *entry; | 1033 | in_cache_entry *entry; |
1034 | 1034 | ||
1035 | entry = mpc->in_ops->get(dst_ip, mpc); | 1035 | entry = mpc->in_ops->get(dst_ip, mpc); |
@@ -1066,7 +1066,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc) | |||
1066 | */ | 1066 | */ |
1067 | static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_client *client, in_cache_entry *entry) | 1067 | static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_client *client, in_cache_entry *entry) |
1068 | { | 1068 | { |
1069 | uint32_t dst_ip = msg->content.in_info.in_dst_ip; | 1069 | __be32 dst_ip = msg->content.in_info.in_dst_ip; |
1070 | struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip); | 1070 | struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip); |
1071 | eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client); | 1071 | eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client); |
1072 | 1072 | ||
@@ -1102,7 +1102,7 @@ static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_clien | |||
1102 | 1102 | ||
1103 | static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc) | 1103 | static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc) |
1104 | { | 1104 | { |
1105 | uint32_t dst_ip = msg->content.in_info.in_dst_ip; | 1105 | __be32 dst_ip = msg->content.in_info.in_dst_ip; |
1106 | in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc); | 1106 | in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc); |
1107 | 1107 | ||
1108 | dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %u.%u.%u.%u\n", mpc->dev->name, NIPQUAD(dst_ip)); | 1108 | dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %u.%u.%u.%u\n", mpc->dev->name, NIPQUAD(dst_ip)); |
@@ -1148,8 +1148,8 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc) | |||
1148 | 1148 | ||
1149 | static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) | 1149 | static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) |
1150 | { | 1150 | { |
1151 | uint32_t dst_ip = msg->content.in_info.in_dst_ip; | 1151 | __be32 dst_ip = msg->content.in_info.in_dst_ip; |
1152 | uint32_t mask = msg->ip_mask; | 1152 | __be32 mask = msg->ip_mask; |
1153 | in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); | 1153 | in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); |
1154 | 1154 | ||
1155 | if(entry == NULL){ | 1155 | if(entry == NULL){ |
@@ -1173,7 +1173,7 @@ static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) | |||
1173 | 1173 | ||
1174 | static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) | 1174 | static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc) |
1175 | { | 1175 | { |
1176 | uint32_t cache_id = msg->content.eg_info.cache_id; | 1176 | __be32 cache_id = msg->content.eg_info.cache_id; |
1177 | eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc); | 1177 | eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc); |
1178 | 1178 | ||
1179 | if (entry == NULL) { | 1179 | if (entry == NULL) { |
@@ -1322,13 +1322,12 @@ static void set_mps_mac_addr_rcvd(struct k_message *msg, struct mpoa_client *cli | |||
1322 | if(client->number_of_mps_macs) | 1322 | if(client->number_of_mps_macs) |
1323 | kfree(client->mps_macs); | 1323 | kfree(client->mps_macs); |
1324 | client->number_of_mps_macs = 0; | 1324 | client->number_of_mps_macs = 0; |
1325 | client->mps_macs = kmalloc(ETH_ALEN,GFP_KERNEL); | 1325 | client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL); |
1326 | if (client->mps_macs == NULL) { | 1326 | if (client->mps_macs == NULL) { |
1327 | printk("mpoa: set_mps_mac_addr_rcvd: out of memory\n"); | 1327 | printk("mpoa: set_mps_mac_addr_rcvd: out of memory\n"); |
1328 | return; | 1328 | return; |
1329 | } | 1329 | } |
1330 | client->number_of_mps_macs = 1; | 1330 | client->number_of_mps_macs = 1; |
1331 | memcpy(client->mps_macs, msg->MPS_ctrl, ETH_ALEN); | ||
1332 | 1331 | ||
1333 | return; | 1332 | return; |
1334 | } | 1333 | } |
diff --git a/net/atm/mpc.h b/net/atm/mpc.h index 3c7981a229e8..51f460d005c3 100644 --- a/net/atm/mpc.h +++ b/net/atm/mpc.h | |||
@@ -36,14 +36,14 @@ struct mpoa_client { | |||
36 | 36 | ||
37 | struct atm_mpoa_qos { | 37 | struct atm_mpoa_qos { |
38 | struct atm_mpoa_qos *next; | 38 | struct atm_mpoa_qos *next; |
39 | uint32_t ipaddr; | 39 | __be32 ipaddr; |
40 | struct atm_qos qos; | 40 | struct atm_qos qos; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | 43 | ||
44 | /* MPOA QoS operations */ | 44 | /* MPOA QoS operations */ |
45 | struct atm_mpoa_qos *atm_mpoa_add_qos(uint32_t dst_ip, struct atm_qos *qos); | 45 | struct atm_mpoa_qos *atm_mpoa_add_qos(__be32 dst_ip, struct atm_qos *qos); |
46 | struct atm_mpoa_qos *atm_mpoa_search_qos(uint32_t dst_ip); | 46 | struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip); |
47 | int atm_mpoa_delete_qos(struct atm_mpoa_qos *qos); | 47 | int atm_mpoa_delete_qos(struct atm_mpoa_qos *qos); |
48 | 48 | ||
49 | /* Display QoS entries. This is for the procfs */ | 49 | /* Display QoS entries. This is for the procfs */ |
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c index fbf13cdcf46e..697a081533b5 100644 --- a/net/atm/mpoa_caches.c +++ b/net/atm/mpoa_caches.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #define ddprintk(format,args...) | 22 | #define ddprintk(format,args...) |
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | static in_cache_entry *in_cache_get(uint32_t dst_ip, | 25 | static in_cache_entry *in_cache_get(__be32 dst_ip, |
26 | struct mpoa_client *client) | 26 | struct mpoa_client *client) |
27 | { | 27 | { |
28 | in_cache_entry *entry; | 28 | in_cache_entry *entry; |
@@ -42,9 +42,9 @@ static in_cache_entry *in_cache_get(uint32_t dst_ip, | |||
42 | return NULL; | 42 | return NULL; |
43 | } | 43 | } |
44 | 44 | ||
45 | static in_cache_entry *in_cache_get_with_mask(uint32_t dst_ip, | 45 | static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip, |
46 | struct mpoa_client *client, | 46 | struct mpoa_client *client, |
47 | uint32_t mask) | 47 | __be32 mask) |
48 | { | 48 | { |
49 | in_cache_entry *entry; | 49 | in_cache_entry *entry; |
50 | 50 | ||
@@ -84,10 +84,10 @@ static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc, | |||
84 | return NULL; | 84 | return NULL; |
85 | } | 85 | } |
86 | 86 | ||
87 | static in_cache_entry *in_cache_add_entry(uint32_t dst_ip, | 87 | static in_cache_entry *in_cache_add_entry(__be32 dst_ip, |
88 | struct mpoa_client *client) | 88 | struct mpoa_client *client) |
89 | { | 89 | { |
90 | in_cache_entry* entry = kmalloc(sizeof(in_cache_entry), GFP_KERNEL); | 90 | in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL); |
91 | 91 | ||
92 | if (entry == NULL) { | 92 | if (entry == NULL) { |
93 | printk("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n"); | 93 | printk("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n"); |
@@ -95,7 +95,6 @@ static in_cache_entry *in_cache_add_entry(uint32_t dst_ip, | |||
95 | } | 95 | } |
96 | 96 | ||
97 | dprintk("mpoa: mpoa_caches.c: adding an ingress entry, ip = %u.%u.%u.%u\n", NIPQUAD(dst_ip)); | 97 | dprintk("mpoa: mpoa_caches.c: adding an ingress entry, ip = %u.%u.%u.%u\n", NIPQUAD(dst_ip)); |
98 | memset(entry,0,sizeof(in_cache_entry)); | ||
99 | 98 | ||
100 | atomic_set(&entry->use, 1); | 99 | atomic_set(&entry->use, 1); |
101 | dprintk("mpoa: mpoa_caches.c: new_in_cache_entry: about to lock\n"); | 100 | dprintk("mpoa: mpoa_caches.c: new_in_cache_entry: about to lock\n"); |
@@ -319,7 +318,7 @@ static void in_destroy_cache(struct mpoa_client *mpc) | |||
319 | return; | 318 | return; |
320 | } | 319 | } |
321 | 320 | ||
322 | static eg_cache_entry *eg_cache_get_by_cache_id(uint32_t cache_id, struct mpoa_client *mpc) | 321 | static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id, struct mpoa_client *mpc) |
323 | { | 322 | { |
324 | eg_cache_entry *entry; | 323 | eg_cache_entry *entry; |
325 | 324 | ||
@@ -339,7 +338,7 @@ static eg_cache_entry *eg_cache_get_by_cache_id(uint32_t cache_id, struct mpoa_c | |||
339 | } | 338 | } |
340 | 339 | ||
341 | /* This can be called from any context since it saves CPU flags */ | 340 | /* This can be called from any context since it saves CPU flags */ |
342 | static eg_cache_entry *eg_cache_get_by_tag(uint32_t tag, struct mpoa_client *mpc) | 341 | static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc) |
343 | { | 342 | { |
344 | unsigned long flags; | 343 | unsigned long flags; |
345 | eg_cache_entry *entry; | 344 | eg_cache_entry *entry; |
@@ -380,7 +379,7 @@ static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc, struct mpoa_clie | |||
380 | return NULL; | 379 | return NULL; |
381 | } | 380 | } |
382 | 381 | ||
383 | static eg_cache_entry *eg_cache_get_by_src_ip(uint32_t ipaddr, struct mpoa_client *mpc) | 382 | static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr, struct mpoa_client *mpc) |
384 | { | 383 | { |
385 | eg_cache_entry *entry; | 384 | eg_cache_entry *entry; |
386 | 385 | ||
@@ -447,7 +446,7 @@ static void eg_cache_remove_entry(eg_cache_entry *entry, | |||
447 | 446 | ||
448 | static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_client *client) | 447 | static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_client *client) |
449 | { | 448 | { |
450 | eg_cache_entry *entry = kmalloc(sizeof(eg_cache_entry), GFP_KERNEL); | 449 | eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL); |
451 | 450 | ||
452 | if (entry == NULL) { | 451 | if (entry == NULL) { |
453 | printk("mpoa: mpoa_caches.c: new_eg_cache_entry: out of memory\n"); | 452 | printk("mpoa: mpoa_caches.c: new_eg_cache_entry: out of memory\n"); |
@@ -455,7 +454,6 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_cli | |||
455 | } | 454 | } |
456 | 455 | ||
457 | dprintk("mpoa: mpoa_caches.c: adding an egress entry, ip = %u.%u.%u.%u, this should be our IP\n", NIPQUAD(msg->content.eg_info.eg_dst_ip)); | 456 | dprintk("mpoa: mpoa_caches.c: adding an egress entry, ip = %u.%u.%u.%u, this should be our IP\n", NIPQUAD(msg->content.eg_info.eg_dst_ip)); |
458 | memset(entry, 0, sizeof(eg_cache_entry)); | ||
459 | 457 | ||
460 | atomic_set(&entry->use, 1); | 458 | atomic_set(&entry->use, 1); |
461 | dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry: about to lock\n"); | 459 | dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry: about to lock\n"); |
diff --git a/net/atm/mpoa_caches.h b/net/atm/mpoa_caches.h index 6c9886a03d0b..84de977def2e 100644 --- a/net/atm/mpoa_caches.h +++ b/net/atm/mpoa_caches.h | |||
@@ -29,12 +29,12 @@ typedef struct in_cache_entry { | |||
29 | } in_cache_entry; | 29 | } in_cache_entry; |
30 | 30 | ||
31 | struct in_cache_ops{ | 31 | struct in_cache_ops{ |
32 | in_cache_entry *(*add_entry)(uint32_t dst_ip, | 32 | in_cache_entry *(*add_entry)(__be32 dst_ip, |
33 | struct mpoa_client *client); | 33 | struct mpoa_client *client); |
34 | in_cache_entry *(*get)(uint32_t dst_ip, struct mpoa_client *client); | 34 | in_cache_entry *(*get)(__be32 dst_ip, struct mpoa_client *client); |
35 | in_cache_entry *(*get_with_mask)(uint32_t dst_ip, | 35 | in_cache_entry *(*get_with_mask)(__be32 dst_ip, |
36 | struct mpoa_client *client, | 36 | struct mpoa_client *client, |
37 | uint32_t mask); | 37 | __be32 mask); |
38 | in_cache_entry *(*get_by_vcc)(struct atm_vcc *vcc, | 38 | in_cache_entry *(*get_by_vcc)(struct atm_vcc *vcc, |
39 | struct mpoa_client *client); | 39 | struct mpoa_client *client); |
40 | void (*put)(in_cache_entry *entry); | 40 | void (*put)(in_cache_entry *entry); |
@@ -56,17 +56,17 @@ typedef struct eg_cache_entry{ | |||
56 | struct atm_vcc *shortcut; | 56 | struct atm_vcc *shortcut; |
57 | uint32_t packets_rcvd; | 57 | uint32_t packets_rcvd; |
58 | uint16_t entry_state; | 58 | uint16_t entry_state; |
59 | uint32_t latest_ip_addr; /* The src IP address of the last packet */ | 59 | __be32 latest_ip_addr; /* The src IP address of the last packet */ |
60 | struct eg_ctrl_info ctrl_info; | 60 | struct eg_ctrl_info ctrl_info; |
61 | atomic_t use; | 61 | atomic_t use; |
62 | } eg_cache_entry; | 62 | } eg_cache_entry; |
63 | 63 | ||
64 | struct eg_cache_ops{ | 64 | struct eg_cache_ops{ |
65 | eg_cache_entry *(*add_entry)(struct k_message *msg, struct mpoa_client *client); | 65 | eg_cache_entry *(*add_entry)(struct k_message *msg, struct mpoa_client *client); |
66 | eg_cache_entry *(*get_by_cache_id)(uint32_t cache_id, struct mpoa_client *client); | 66 | eg_cache_entry *(*get_by_cache_id)(__be32 cache_id, struct mpoa_client *client); |
67 | eg_cache_entry *(*get_by_tag)(uint32_t cache_id, struct mpoa_client *client); | 67 | eg_cache_entry *(*get_by_tag)(__be32 cache_id, struct mpoa_client *client); |
68 | eg_cache_entry *(*get_by_vcc)(struct atm_vcc *vcc, struct mpoa_client *client); | 68 | eg_cache_entry *(*get_by_vcc)(struct atm_vcc *vcc, struct mpoa_client *client); |
69 | eg_cache_entry *(*get_by_src_ip)(uint32_t ipaddr, struct mpoa_client *client); | 69 | eg_cache_entry *(*get_by_src_ip)(__be32 ipaddr, struct mpoa_client *client); |
70 | void (*put)(eg_cache_entry *entry); | 70 | void (*put)(eg_cache_entry *entry); |
71 | void (*remove_entry)(eg_cache_entry *entry, struct mpoa_client *client); | 71 | void (*remove_entry)(eg_cache_entry *entry, struct mpoa_client *client); |
72 | void (*update)(eg_cache_entry *entry, uint16_t holding_time); | 72 | void (*update)(eg_cache_entry *entry, uint16_t holding_time); |
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c index d37b8911b3ab..3844c85d602f 100644 --- a/net/atm/mpoa_proc.c +++ b/net/atm/mpoa_proc.c | |||
@@ -231,14 +231,14 @@ static int parse_qos(const char *buff) | |||
231 | */ | 231 | */ |
232 | unsigned char ip[4]; | 232 | unsigned char ip[4]; |
233 | int tx_pcr, tx_sdu, rx_pcr, rx_sdu; | 233 | int tx_pcr, tx_sdu, rx_pcr, rx_sdu; |
234 | uint32_t ipaddr; | 234 | __be32 ipaddr; |
235 | struct atm_qos qos; | 235 | struct atm_qos qos; |
236 | 236 | ||
237 | memset(&qos, 0, sizeof(struct atm_qos)); | 237 | memset(&qos, 0, sizeof(struct atm_qos)); |
238 | 238 | ||
239 | if (sscanf(buff, "del %hhu.%hhu.%hhu.%hhu", | 239 | if (sscanf(buff, "del %hhu.%hhu.%hhu.%hhu", |
240 | ip, ip+1, ip+2, ip+3) == 4) { | 240 | ip, ip+1, ip+2, ip+3) == 4) { |
241 | ipaddr = *(uint32_t *)ip; | 241 | ipaddr = *(__be32 *)ip; |
242 | return atm_mpoa_delete_qos(atm_mpoa_search_qos(ipaddr)); | 242 | return atm_mpoa_delete_qos(atm_mpoa_search_qos(ipaddr)); |
243 | } | 243 | } |
244 | 244 | ||
@@ -250,7 +250,7 @@ static int parse_qos(const char *buff) | |||
250 | ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu, &rx_pcr, &rx_sdu) != 8) | 250 | ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu, &rx_pcr, &rx_sdu) != 8) |
251 | return 0; | 251 | return 0; |
252 | 252 | ||
253 | ipaddr = *(uint32_t *)ip; | 253 | ipaddr = *(__be32 *)ip; |
254 | qos.txtp.traffic_class = ATM_CBR; | 254 | qos.txtp.traffic_class = ATM_CBR; |
255 | qos.txtp.max_pcr = tx_pcr; | 255 | qos.txtp.max_pcr = tx_pcr; |
256 | qos.txtp.max_sdu = tx_sdu; | 256 | qos.txtp.max_sdu = tx_sdu; |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 000695c48583..6cabf6d8a751 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -906,13 +906,13 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev) | |||
906 | ax25->source_addr = oax25->source_addr; | 906 | ax25->source_addr = oax25->source_addr; |
907 | 907 | ||
908 | if (oax25->digipeat != NULL) { | 908 | if (oax25->digipeat != NULL) { |
909 | if ((ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { | 909 | ax25->digipeat = kmemdup(oax25->digipeat, sizeof(ax25_digi), |
910 | GFP_ATOMIC); | ||
911 | if (ax25->digipeat == NULL) { | ||
910 | sk_free(sk); | 912 | sk_free(sk); |
911 | ax25_cb_put(ax25); | 913 | ax25_cb_put(ax25); |
912 | return NULL; | 914 | return NULL; |
913 | } | 915 | } |
914 | |||
915 | memcpy(ax25->digipeat, oax25->digipeat, sizeof(ax25_digi)); | ||
916 | } | 916 | } |
917 | 917 | ||
918 | sk->sk_protinfo = ax25; | 918 | sk->sk_protinfo = ax25; |
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c index d7736e585336..f84047d1e8ce 100644 --- a/net/ax25/ax25_out.c +++ b/net/ax25/ax25_out.c | |||
@@ -70,11 +70,11 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2 | |||
70 | ax25->dest_addr = *dest; | 70 | ax25->dest_addr = *dest; |
71 | 71 | ||
72 | if (digi != NULL) { | 72 | if (digi != NULL) { |
73 | if ((ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { | 73 | ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC); |
74 | if (ax25->digipeat == NULL) { | ||
74 | ax25_cb_put(ax25); | 75 | ax25_cb_put(ax25); |
75 | return NULL; | 76 | return NULL; |
76 | } | 77 | } |
77 | memcpy(ax25->digipeat, digi, sizeof(ax25_digi)); | ||
78 | } | 78 | } |
79 | 79 | ||
80 | switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { | 80 | switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { |
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index 51b7bdaf27eb..8580356ace5c 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c | |||
@@ -432,11 +432,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) | |||
432 | } | 432 | } |
433 | 433 | ||
434 | if (ax25_rt->digipeat != NULL) { | 434 | if (ax25_rt->digipeat != NULL) { |
435 | if ((ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { | 435 | ax25->digipeat = kmemdup(ax25_rt->digipeat, sizeof(ax25_digi), |
436 | GFP_ATOMIC); | ||
437 | if (ax25->digipeat == NULL) { | ||
436 | err = -ENOMEM; | 438 | err = -ENOMEM; |
437 | goto put; | 439 | goto put; |
438 | } | 440 | } |
439 | memcpy(ax25->digipeat, ax25_rt->digipeat, sizeof(ax25_digi)); | ||
440 | ax25_adjust_path(addr, ax25->digipeat); | 441 | ax25_adjust_path(addr, ax25->digipeat); |
441 | } | 442 | } |
442 | 443 | ||
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c index 867d42537979..d23a27f25d2f 100644 --- a/net/ax25/sysctl_net_ax25.c +++ b/net/ax25/sysctl_net_ax25.c | |||
@@ -209,7 +209,9 @@ void ax25_register_sysctl(void) | |||
209 | } | 209 | } |
210 | 210 | ||
211 | for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { | 211 | for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { |
212 | ctl_table *child = kmalloc(sizeof(ax25_param_table), GFP_ATOMIC); | 212 | struct ctl_table *child = kmemdup(ax25_param_table, |
213 | sizeof(ax25_param_table), | ||
214 | GFP_ATOMIC); | ||
213 | if (!child) { | 215 | if (!child) { |
214 | while (n--) | 216 | while (n--) |
215 | kfree(ax25_table[n].child); | 217 | kfree(ax25_table[n].child); |
@@ -217,7 +219,6 @@ void ax25_register_sysctl(void) | |||
217 | spin_unlock_bh(&ax25_dev_lock); | 219 | spin_unlock_bh(&ax25_dev_lock); |
218 | return; | 220 | return; |
219 | } | 221 | } |
220 | memcpy(child, ax25_param_table, sizeof(ax25_param_table)); | ||
221 | ax25_table[n].child = ax25_dev->systable = child; | 222 | ax25_table[n].child = ax25_dev->systable = child; |
222 | ax25_table[n].ctl_name = n + 1; | 223 | ax25_table[n].ctl_name = n + 1; |
223 | ax25_table[n].procname = ax25_dev->dev->name; | 224 | ax25_table[n].procname = ax25_dev->dev->name; |
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index bbb1ed7097a9..0b6cd0e2528d 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h | |||
@@ -95,14 +95,14 @@ struct bnep_setup_conn_req { | |||
95 | struct bnep_set_filter_req { | 95 | struct bnep_set_filter_req { |
96 | __u8 type; | 96 | __u8 type; |
97 | __u8 ctrl; | 97 | __u8 ctrl; |
98 | __u16 len; | 98 | __be16 len; |
99 | __u8 list[0]; | 99 | __u8 list[0]; |
100 | } __attribute__((packed)); | 100 | } __attribute__((packed)); |
101 | 101 | ||
102 | struct bnep_control_rsp { | 102 | struct bnep_control_rsp { |
103 | __u8 type; | 103 | __u8 type; |
104 | __u8 ctrl; | 104 | __u8 ctrl; |
105 | __u16 resp; | 105 | __be16 resp; |
106 | } __attribute__((packed)); | 106 | } __attribute__((packed)); |
107 | 107 | ||
108 | struct bnep_ext_hdr { | 108 | struct bnep_ext_hdr { |
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 4d3424c2421c..7ba6470dc507 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c | |||
@@ -117,18 +117,18 @@ static int bnep_send_rsp(struct bnep_session *s, u8 ctrl, u16 resp) | |||
117 | static inline void bnep_set_default_proto_filter(struct bnep_session *s) | 117 | static inline void bnep_set_default_proto_filter(struct bnep_session *s) |
118 | { | 118 | { |
119 | /* (IPv4, ARP) */ | 119 | /* (IPv4, ARP) */ |
120 | s->proto_filter[0].start = htons(0x0800); | 120 | s->proto_filter[0].start = ETH_P_IP; |
121 | s->proto_filter[0].end = htons(0x0806); | 121 | s->proto_filter[0].end = ETH_P_ARP; |
122 | /* (RARP, AppleTalk) */ | 122 | /* (RARP, AppleTalk) */ |
123 | s->proto_filter[1].start = htons(0x8035); | 123 | s->proto_filter[1].start = ETH_P_RARP; |
124 | s->proto_filter[1].end = htons(0x80F3); | 124 | s->proto_filter[1].end = ETH_P_AARP; |
125 | /* (IPX, IPv6) */ | 125 | /* (IPX, IPv6) */ |
126 | s->proto_filter[2].start = htons(0x8137); | 126 | s->proto_filter[2].start = ETH_P_IPX; |
127 | s->proto_filter[2].end = htons(0x86DD); | 127 | s->proto_filter[2].end = ETH_P_IPV6; |
128 | } | 128 | } |
129 | #endif | 129 | #endif |
130 | 130 | ||
131 | static int bnep_ctrl_set_netfilter(struct bnep_session *s, u16 *data, int len) | 131 | static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len) |
132 | { | 132 | { |
133 | int n; | 133 | int n; |
134 | 134 | ||
@@ -150,8 +150,8 @@ static int bnep_ctrl_set_netfilter(struct bnep_session *s, u16 *data, int len) | |||
150 | int i; | 150 | int i; |
151 | 151 | ||
152 | for (i = 0; i < n; i++) { | 152 | for (i = 0; i < n; i++) { |
153 | f[i].start = get_unaligned(data++); | 153 | f[i].start = ntohs(get_unaligned(data++)); |
154 | f[i].end = get_unaligned(data++); | 154 | f[i].end = ntohs(get_unaligned(data++)); |
155 | 155 | ||
156 | BT_DBG("proto filter start %d end %d", | 156 | BT_DBG("proto filter start %d end %d", |
157 | f[i].start, f[i].end); | 157 | f[i].start, f[i].end); |
@@ -180,7 +180,7 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len) | |||
180 | if (len < 2) | 180 | if (len < 2) |
181 | return -EILSEQ; | 181 | return -EILSEQ; |
182 | 182 | ||
183 | n = ntohs(get_unaligned((u16 *) data)); | 183 | n = ntohs(get_unaligned((__be16 *) data)); |
184 | data += 2; len -= 2; | 184 | data += 2; len -= 2; |
185 | 185 | ||
186 | if (len < n) | 186 | if (len < n) |
@@ -332,7 +332,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
332 | if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK])) | 332 | if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK])) |
333 | goto badframe; | 333 | goto badframe; |
334 | 334 | ||
335 | s->eh.h_proto = get_unaligned((u16 *) (skb->data - 2)); | 335 | s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); |
336 | 336 | ||
337 | if (type & BNEP_EXT_HEADER) { | 337 | if (type & BNEP_EXT_HEADER) { |
338 | if (bnep_rx_extension(s, skb) < 0) | 338 | if (bnep_rx_extension(s, skb) < 0) |
@@ -343,7 +343,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
343 | if (ntohs(s->eh.h_proto) == 0x8100) { | 343 | if (ntohs(s->eh.h_proto) == 0x8100) { |
344 | if (!skb_pull(skb, 4)) | 344 | if (!skb_pull(skb, 4)) |
345 | goto badframe; | 345 | goto badframe; |
346 | s->eh.h_proto = get_unaligned((u16 *) (skb->data - 2)); | 346 | s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); |
347 | } | 347 | } |
348 | 348 | ||
349 | /* We have to alloc new skb and copy data here :(. Because original skb | 349 | /* We have to alloc new skb and copy data here :(. Because original skb |
@@ -365,7 +365,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
365 | case BNEP_COMPRESSED_SRC_ONLY: | 365 | case BNEP_COMPRESSED_SRC_ONLY: |
366 | memcpy(__skb_put(nskb, ETH_ALEN), s->eh.h_dest, ETH_ALEN); | 366 | memcpy(__skb_put(nskb, ETH_ALEN), s->eh.h_dest, ETH_ALEN); |
367 | memcpy(__skb_put(nskb, ETH_ALEN), skb->mac.raw, ETH_ALEN); | 367 | memcpy(__skb_put(nskb, ETH_ALEN), skb->mac.raw, ETH_ALEN); |
368 | put_unaligned(s->eh.h_proto, (u16 *) __skb_put(nskb, 2)); | 368 | put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); |
369 | break; | 369 | break; |
370 | 370 | ||
371 | case BNEP_COMPRESSED_DST_ONLY: | 371 | case BNEP_COMPRESSED_DST_ONLY: |
@@ -375,7 +375,7 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) | |||
375 | 375 | ||
376 | case BNEP_GENERAL: | 376 | case BNEP_GENERAL: |
377 | memcpy(__skb_put(nskb, ETH_ALEN * 2), skb->mac.raw, ETH_ALEN * 2); | 377 | memcpy(__skb_put(nskb, ETH_ALEN * 2), skb->mac.raw, ETH_ALEN * 2); |
378 | put_unaligned(s->eh.h_proto, (u16 *) __skb_put(nskb, 2)); | 378 | put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); |
379 | break; | 379 | break; |
380 | } | 380 | } |
381 | 381 | ||
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index 7f7b27db6a8f..67a002a9751a 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c | |||
@@ -158,14 +158,15 @@ static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s | |||
158 | static inline u16 bnep_net_eth_proto(struct sk_buff *skb) | 158 | static inline u16 bnep_net_eth_proto(struct sk_buff *skb) |
159 | { | 159 | { |
160 | struct ethhdr *eh = (void *) skb->data; | 160 | struct ethhdr *eh = (void *) skb->data; |
161 | u16 proto = ntohs(eh->h_proto); | ||
161 | 162 | ||
162 | if (ntohs(eh->h_proto) >= 1536) | 163 | if (proto >= 1536) |
163 | return eh->h_proto; | 164 | return proto; |
164 | 165 | ||
165 | if (get_unaligned((u16 *) skb->data) == 0xFFFF) | 166 | if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF)) |
166 | return htons(ETH_P_802_3); | 167 | return ETH_P_802_3; |
167 | 168 | ||
168 | return htons(ETH_P_802_2); | 169 | return ETH_P_802_2; |
169 | } | 170 | } |
170 | 171 | ||
171 | static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) | 172 | static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index bbf78e6a7bc3..29a8fa4d3728 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -770,7 +770,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl | |||
770 | long timeo; | 770 | long timeo; |
771 | int err = 0; | 771 | int err = 0; |
772 | 772 | ||
773 | lock_sock(sk); | 773 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); |
774 | 774 | ||
775 | if (sk->sk_state != BT_LISTEN) { | 775 | if (sk->sk_state != BT_LISTEN) { |
776 | err = -EBADFD; | 776 | err = -EBADFD; |
@@ -792,7 +792,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl | |||
792 | 792 | ||
793 | release_sock(sk); | 793 | release_sock(sk); |
794 | timeo = schedule_timeout(timeo); | 794 | timeo = schedule_timeout(timeo); |
795 | lock_sock(sk); | 795 | lock_sock_nested(sk, SINGLE_DEPTH_NESTING); |
796 | 796 | ||
797 | if (sk->sk_state != BT_LISTEN) { | 797 | if (sk->sk_state != BT_LISTEN) { |
798 | err = -EBADFD; | 798 | err = -EBADFD; |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index ddc4e9d5963e..278c8676906a 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -854,7 +854,7 @@ int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci, | |||
854 | rpn->flow_ctrl = flow_ctrl_settings; | 854 | rpn->flow_ctrl = flow_ctrl_settings; |
855 | rpn->xon_char = xon_char; | 855 | rpn->xon_char = xon_char; |
856 | rpn->xoff_char = xoff_char; | 856 | rpn->xoff_char = xoff_char; |
857 | rpn->param_mask = param_mask; | 857 | rpn->param_mask = cpu_to_le16(param_mask); |
858 | 858 | ||
859 | *ptr = __fcs(buf); ptr++; | 859 | *ptr = __fcs(buf); ptr++; |
860 | 860 | ||
@@ -1018,7 +1018,7 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr) | |||
1018 | 1018 | ||
1019 | if (len > 127) { | 1019 | if (len > 127) { |
1020 | hdr = (void *) skb_push(skb, 4); | 1020 | hdr = (void *) skb_push(skb, 4); |
1021 | put_unaligned(htobs(__len16(len)), (u16 *) &hdr->len); | 1021 | put_unaligned(htobs(__len16(len)), (__le16 *) &hdr->len); |
1022 | } else { | 1022 | } else { |
1023 | hdr = (void *) skb_push(skb, 3); | 1023 | hdr = (void *) skb_push(skb, 3); |
1024 | hdr->len = __len8(len); | 1024 | hdr->len = __len8(len); |
@@ -1343,7 +1343,7 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_ | |||
1343 | /* Check for sane values, ignore/accept bit_rate, 8 bits, 1 stop bit, | 1343 | /* Check for sane values, ignore/accept bit_rate, 8 bits, 1 stop bit, |
1344 | * no parity, no flow control lines, normal XON/XOFF chars */ | 1344 | * no parity, no flow control lines, normal XON/XOFF chars */ |
1345 | 1345 | ||
1346 | if (rpn->param_mask & RFCOMM_RPN_PM_BITRATE) { | 1346 | if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_BITRATE)) { |
1347 | bit_rate = rpn->bit_rate; | 1347 | bit_rate = rpn->bit_rate; |
1348 | if (bit_rate != RFCOMM_RPN_BR_115200) { | 1348 | if (bit_rate != RFCOMM_RPN_BR_115200) { |
1349 | BT_DBG("RPN bit rate mismatch 0x%x", bit_rate); | 1349 | BT_DBG("RPN bit rate mismatch 0x%x", bit_rate); |
@@ -1352,7 +1352,7 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_ | |||
1352 | } | 1352 | } |
1353 | } | 1353 | } |
1354 | 1354 | ||
1355 | if (rpn->param_mask & RFCOMM_RPN_PM_DATA) { | 1355 | if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_DATA)) { |
1356 | data_bits = __get_rpn_data_bits(rpn->line_settings); | 1356 | data_bits = __get_rpn_data_bits(rpn->line_settings); |
1357 | if (data_bits != RFCOMM_RPN_DATA_8) { | 1357 | if (data_bits != RFCOMM_RPN_DATA_8) { |
1358 | BT_DBG("RPN data bits mismatch 0x%x", data_bits); | 1358 | BT_DBG("RPN data bits mismatch 0x%x", data_bits); |
@@ -1361,7 +1361,7 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_ | |||
1361 | } | 1361 | } |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | if (rpn->param_mask & RFCOMM_RPN_PM_STOP) { | 1364 | if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_STOP)) { |
1365 | stop_bits = __get_rpn_stop_bits(rpn->line_settings); | 1365 | stop_bits = __get_rpn_stop_bits(rpn->line_settings); |
1366 | if (stop_bits != RFCOMM_RPN_STOP_1) { | 1366 | if (stop_bits != RFCOMM_RPN_STOP_1) { |
1367 | BT_DBG("RPN stop bits mismatch 0x%x", stop_bits); | 1367 | BT_DBG("RPN stop bits mismatch 0x%x", stop_bits); |
@@ -1370,7 +1370,7 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_ | |||
1370 | } | 1370 | } |
1371 | } | 1371 | } |
1372 | 1372 | ||
1373 | if (rpn->param_mask & RFCOMM_RPN_PM_PARITY) { | 1373 | if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_PARITY)) { |
1374 | parity = __get_rpn_parity(rpn->line_settings); | 1374 | parity = __get_rpn_parity(rpn->line_settings); |
1375 | if (parity != RFCOMM_RPN_PARITY_NONE) { | 1375 | if (parity != RFCOMM_RPN_PARITY_NONE) { |
1376 | BT_DBG("RPN parity mismatch 0x%x", parity); | 1376 | BT_DBG("RPN parity mismatch 0x%x", parity); |
@@ -1379,7 +1379,7 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_ | |||
1379 | } | 1379 | } |
1380 | } | 1380 | } |
1381 | 1381 | ||
1382 | if (rpn->param_mask & RFCOMM_RPN_PM_FLOW) { | 1382 | if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_FLOW)) { |
1383 | flow_ctrl = rpn->flow_ctrl; | 1383 | flow_ctrl = rpn->flow_ctrl; |
1384 | if (flow_ctrl != RFCOMM_RPN_FLOW_NONE) { | 1384 | if (flow_ctrl != RFCOMM_RPN_FLOW_NONE) { |
1385 | BT_DBG("RPN flow ctrl mismatch 0x%x", flow_ctrl); | 1385 | BT_DBG("RPN flow ctrl mismatch 0x%x", flow_ctrl); |
@@ -1388,7 +1388,7 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_ | |||
1388 | } | 1388 | } |
1389 | } | 1389 | } |
1390 | 1390 | ||
1391 | if (rpn->param_mask & RFCOMM_RPN_PM_XON) { | 1391 | if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_XON)) { |
1392 | xon_char = rpn->xon_char; | 1392 | xon_char = rpn->xon_char; |
1393 | if (xon_char != RFCOMM_RPN_XON_CHAR) { | 1393 | if (xon_char != RFCOMM_RPN_XON_CHAR) { |
1394 | BT_DBG("RPN XON char mismatch 0x%x", xon_char); | 1394 | BT_DBG("RPN XON char mismatch 0x%x", xon_char); |
@@ -1397,7 +1397,7 @@ static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_ | |||
1397 | } | 1397 | } |
1398 | } | 1398 | } |
1399 | 1399 | ||
1400 | if (rpn->param_mask & RFCOMM_RPN_PM_XOFF) { | 1400 | if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_XOFF)) { |
1401 | xoff_char = rpn->xoff_char; | 1401 | xoff_char = rpn->xoff_char; |
1402 | if (xoff_char != RFCOMM_RPN_XOFF_CHAR) { | 1402 | if (xoff_char != RFCOMM_RPN_XOFF_CHAR) { |
1403 | BT_DBG("RPN XOFF char mismatch 0x%x", xoff_char); | 1403 | BT_DBG("RPN XOFF char mismatch 0x%x", xoff_char); |
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index 4e4119a12139..4c61a7e0a86e 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c | |||
@@ -58,12 +58,13 @@ static int get_fdb_entries(struct net_bridge *br, void __user *userbuf, | |||
58 | { | 58 | { |
59 | int num; | 59 | int num; |
60 | void *buf; | 60 | void *buf; |
61 | size_t size = maxnum * sizeof(struct __fdb_entry); | 61 | size_t size; |
62 | 62 | ||
63 | if (size > PAGE_SIZE) { | 63 | /* Clamp size to PAGE_SIZE, test maxnum to avoid overflow */ |
64 | size = PAGE_SIZE; | 64 | if (maxnum > PAGE_SIZE/sizeof(struct __fdb_entry)) |
65 | maxnum = PAGE_SIZE/sizeof(struct __fdb_entry); | 65 | maxnum = PAGE_SIZE/sizeof(struct __fdb_entry); |
66 | } | 66 | |
67 | size = maxnum * sizeof(struct __fdb_entry); | ||
67 | 68 | ||
68 | buf = kmalloc(size, GFP_USER); | 69 | buf = kmalloc(size, GFP_USER); |
69 | if (!buf) | 70 | if (!buf) |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index ac181be13d83..ac47ba2ba028 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -40,7 +40,6 @@ | |||
40 | #include <net/route.h> | 40 | #include <net/route.h> |
41 | 41 | ||
42 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
43 | #include <asm/checksum.h> | ||
44 | #include "br_private.h" | 43 | #include "br_private.h" |
45 | #ifdef CONFIG_SYSCTL | 44 | #ifdef CONFIG_SYSCTL |
46 | #include <linux/sysctl.h> | 45 | #include <linux/sysctl.h> |
@@ -381,7 +380,7 @@ static int check_hbh_len(struct sk_buff *skb) | |||
381 | case IPV6_TLV_JUMBO: | 380 | case IPV6_TLV_JUMBO: |
382 | if (skb->nh.raw[off + 1] != 4 || (off & 3) != 2) | 381 | if (skb->nh.raw[off + 1] != 4 || (off & 3) != 2) |
383 | goto bad; | 382 | goto bad; |
384 | pkt_len = ntohl(*(u32 *) (skb->nh.raw + off + 2)); | 383 | pkt_len = ntohl(*(__be32 *) (skb->nh.raw + off + 2)); |
385 | if (pkt_len <= IPV6_MAXPLEN || | 384 | if (pkt_len <= IPV6_MAXPLEN || |
386 | skb->nh.ipv6h->payload_len) | 385 | skb->nh.ipv6h->payload_len) |
387 | goto bad; | 386 | goto bad; |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 8f661195d09d..a9139682c49b 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -15,6 +15,18 @@ | |||
15 | #include <net/netlink.h> | 15 | #include <net/netlink.h> |
16 | #include "br_private.h" | 16 | #include "br_private.h" |
17 | 17 | ||
18 | static inline size_t br_nlmsg_size(void) | ||
19 | { | ||
20 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) | ||
21 | + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ | ||
22 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ | ||
23 | + nla_total_size(4) /* IFLA_MASTER */ | ||
24 | + nla_total_size(4) /* IFLA_MTU */ | ||
25 | + nla_total_size(4) /* IFLA_LINK */ | ||
26 | + nla_total_size(1) /* IFLA_OPERSTATE */ | ||
27 | + nla_total_size(1); /* IFLA_PROTINFO */ | ||
28 | } | ||
29 | |||
18 | /* | 30 | /* |
19 | * Create one netlink message for one interface | 31 | * Create one netlink message for one interface |
20 | * Contains port and master info as well as carrier and bridge state. | 32 | * Contains port and master info as well as carrier and bridge state. |
@@ -24,51 +36,43 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por | |||
24 | { | 36 | { |
25 | const struct net_bridge *br = port->br; | 37 | const struct net_bridge *br = port->br; |
26 | const struct net_device *dev = port->dev; | 38 | const struct net_device *dev = port->dev; |
27 | struct ifinfomsg *r; | 39 | struct ifinfomsg *hdr; |
28 | struct nlmsghdr *nlh; | 40 | struct nlmsghdr *nlh; |
29 | unsigned char *b = skb->tail; | ||
30 | u32 mtu = dev->mtu; | ||
31 | u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; | 41 | u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; |
32 | u8 portstate = port->state; | ||
33 | 42 | ||
34 | pr_debug("br_fill_info event %d port %s master %s\n", | 43 | pr_debug("br_fill_info event %d port %s master %s\n", |
35 | event, dev->name, br->dev->name); | 44 | event, dev->name, br->dev->name); |
36 | 45 | ||
37 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); | 46 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); |
38 | r = NLMSG_DATA(nlh); | 47 | if (nlh == NULL) |
39 | r->ifi_family = AF_BRIDGE; | 48 | return -ENOBUFS; |
40 | r->__ifi_pad = 0; | ||
41 | r->ifi_type = dev->type; | ||
42 | r->ifi_index = dev->ifindex; | ||
43 | r->ifi_flags = dev_get_flags(dev); | ||
44 | r->ifi_change = 0; | ||
45 | 49 | ||
46 | RTA_PUT(skb, IFLA_IFNAME, strlen(dev->name)+1, dev->name); | 50 | hdr = nlmsg_data(nlh); |
51 | hdr->ifi_family = AF_BRIDGE; | ||
52 | hdr->__ifi_pad = 0; | ||
53 | hdr->ifi_type = dev->type; | ||
54 | hdr->ifi_index = dev->ifindex; | ||
55 | hdr->ifi_flags = dev_get_flags(dev); | ||
56 | hdr->ifi_change = 0; | ||
47 | 57 | ||
48 | RTA_PUT(skb, IFLA_MASTER, sizeof(int), &br->dev->ifindex); | 58 | NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); |
59 | NLA_PUT_U32(skb, IFLA_MASTER, br->dev->ifindex); | ||
60 | NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); | ||
61 | NLA_PUT_U8(skb, IFLA_OPERSTATE, operstate); | ||
49 | 62 | ||
50 | if (dev->addr_len) | 63 | if (dev->addr_len) |
51 | RTA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); | 64 | NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); |
52 | 65 | ||
53 | RTA_PUT(skb, IFLA_MTU, sizeof(mtu), &mtu); | ||
54 | if (dev->ifindex != dev->iflink) | 66 | if (dev->ifindex != dev->iflink) |
55 | RTA_PUT(skb, IFLA_LINK, sizeof(int), &dev->iflink); | 67 | NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); |
56 | |||
57 | |||
58 | RTA_PUT(skb, IFLA_OPERSTATE, sizeof(operstate), &operstate); | ||
59 | 68 | ||
60 | if (event == RTM_NEWLINK) | 69 | if (event == RTM_NEWLINK) |
61 | RTA_PUT(skb, IFLA_PROTINFO, sizeof(portstate), &portstate); | 70 | NLA_PUT_U8(skb, IFLA_PROTINFO, port->state); |
62 | |||
63 | nlh->nlmsg_len = skb->tail - b; | ||
64 | |||
65 | return skb->len; | ||
66 | 71 | ||
67 | nlmsg_failure: | 72 | return nlmsg_end(skb, nlh); |
68 | rtattr_failure: | ||
69 | 73 | ||
70 | skb_trim(skb, b - skb->data); | 74 | nla_put_failure: |
71 | return -EINVAL; | 75 | return nlmsg_cancel(skb, nlh); |
72 | } | 76 | } |
73 | 77 | ||
74 | /* | 78 | /* |
@@ -77,19 +81,16 @@ rtattr_failure: | |||
77 | void br_ifinfo_notify(int event, struct net_bridge_port *port) | 81 | void br_ifinfo_notify(int event, struct net_bridge_port *port) |
78 | { | 82 | { |
79 | struct sk_buff *skb; | 83 | struct sk_buff *skb; |
80 | int payload = sizeof(struct ifinfomsg) + 128; | ||
81 | int err = -ENOBUFS; | 84 | int err = -ENOBUFS; |
82 | 85 | ||
83 | pr_debug("bridge notify event=%d\n", event); | 86 | pr_debug("bridge notify event=%d\n", event); |
84 | skb = nlmsg_new(nlmsg_total_size(payload), GFP_ATOMIC); | 87 | skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC); |
85 | if (skb == NULL) | 88 | if (skb == NULL) |
86 | goto errout; | 89 | goto errout; |
87 | 90 | ||
88 | err = br_fill_ifinfo(skb, port, 0, 0, event, 0); | 91 | err = br_fill_ifinfo(skb, port, 0, 0, event, 0); |
89 | if (err < 0) { | 92 | /* failure implies BUG in br_nlmsg_size() */ |
90 | kfree_skb(skb); | 93 | BUG_ON(err < 0); |
91 | goto errout; | ||
92 | } | ||
93 | 94 | ||
94 | err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); | 95 | err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); |
95 | errout: | 96 | errout: |
@@ -104,25 +105,18 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
104 | { | 105 | { |
105 | struct net_device *dev; | 106 | struct net_device *dev; |
106 | int idx; | 107 | int idx; |
107 | int s_idx = cb->args[0]; | ||
108 | int err = 0; | ||
109 | 108 | ||
110 | read_lock(&dev_base_lock); | 109 | read_lock(&dev_base_lock); |
111 | for (dev = dev_base, idx = 0; dev; dev = dev->next) { | 110 | for (dev = dev_base, idx = 0; dev; dev = dev->next) { |
112 | struct net_bridge_port *p = dev->br_port; | ||
113 | |||
114 | /* not a bridge port */ | 111 | /* not a bridge port */ |
115 | if (!p) | 112 | if (dev->br_port == NULL || idx < cb->args[0]) |
116 | continue; | 113 | goto skip; |
117 | |||
118 | if (idx < s_idx) | ||
119 | goto cont; | ||
120 | 114 | ||
121 | err = br_fill_ifinfo(skb, p, NETLINK_CB(cb->skb).pid, | 115 | if (br_fill_ifinfo(skb, dev->br_port, NETLINK_CB(cb->skb).pid, |
122 | cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI); | 116 | cb->nlh->nlmsg_seq, RTM_NEWLINK, |
123 | if (err <= 0) | 117 | NLM_F_MULTI) < 0) |
124 | break; | 118 | break; |
125 | cont: | 119 | skip: |
126 | ++idx; | 120 | ++idx; |
127 | } | 121 | } |
128 | read_unlock(&dev_base_lock); | 122 | read_unlock(&dev_base_lock); |
@@ -138,26 +132,27 @@ cont: | |||
138 | */ | 132 | */ |
139 | static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | 133 | static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) |
140 | { | 134 | { |
141 | struct rtattr **rta = arg; | 135 | struct ifinfomsg *ifm; |
142 | struct ifinfomsg *ifm = NLMSG_DATA(nlh); | 136 | struct nlattr *protinfo; |
143 | struct net_device *dev; | 137 | struct net_device *dev; |
144 | struct net_bridge_port *p; | 138 | struct net_bridge_port *p; |
145 | u8 new_state; | 139 | u8 new_state; |
146 | 140 | ||
141 | if (nlmsg_len(nlh) < sizeof(*ifm)) | ||
142 | return -EINVAL; | ||
143 | |||
144 | ifm = nlmsg_data(nlh); | ||
147 | if (ifm->ifi_family != AF_BRIDGE) | 145 | if (ifm->ifi_family != AF_BRIDGE) |
148 | return -EPFNOSUPPORT; | 146 | return -EPFNOSUPPORT; |
149 | 147 | ||
150 | /* Must pass valid state as PROTINFO */ | 148 | protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO); |
151 | if (rta[IFLA_PROTINFO-1]) { | 149 | if (!protinfo || nla_len(protinfo) < sizeof(u8)) |
152 | u8 *pstate = RTA_DATA(rta[IFLA_PROTINFO-1]); | ||
153 | new_state = *pstate; | ||
154 | } else | ||
155 | return -EINVAL; | 150 | return -EINVAL; |
156 | 151 | ||
152 | new_state = nla_get_u8(protinfo); | ||
157 | if (new_state > BR_STATE_BLOCKING) | 153 | if (new_state > BR_STATE_BLOCKING) |
158 | return -EINVAL; | 154 | return -EINVAL; |
159 | 155 | ||
160 | /* Find bridge port */ | ||
161 | dev = __dev_get_by_index(ifm->ifi_index); | 156 | dev = __dev_get_by_index(ifm->ifi_index); |
162 | if (!dev) | 157 | if (!dev) |
163 | return -ENODEV; | 158 | return -ENODEV; |
@@ -170,10 +165,8 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
170 | if (p->br->stp_enabled) | 165 | if (p->br->stp_enabled) |
171 | return -EBUSY; | 166 | return -EBUSY; |
172 | 167 | ||
173 | if (!netif_running(dev)) | 168 | if (!netif_running(dev) || |
174 | return -ENETDOWN; | 169 | (!netif_carrier_ok(dev) && new_state != BR_STATE_DISABLED)) |
175 | |||
176 | if (!netif_carrier_ok(dev) && new_state != BR_STATE_DISABLED) | ||
177 | return -ENETDOWN; | 170 | return -ENETDOWN; |
178 | 171 | ||
179 | p->state = new_state; | 172 | p->state = new_state; |
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c index d42f63f5e9f8..9abbc09ccdc3 100644 --- a/net/bridge/netfilter/ebt_802_3.c +++ b/net/bridge/netfilter/ebt_802_3.c | |||
@@ -17,7 +17,7 @@ static int ebt_filter_802_3(const struct sk_buff *skb, const struct net_device * | |||
17 | { | 17 | { |
18 | struct ebt_802_3_info *info = (struct ebt_802_3_info *)data; | 18 | struct ebt_802_3_info *info = (struct ebt_802_3_info *)data; |
19 | struct ebt_802_3_hdr *hdr = ebt_802_3_hdr(skb); | 19 | struct ebt_802_3_hdr *hdr = ebt_802_3_hdr(skb); |
20 | uint16_t type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type; | 20 | __be16 type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type; |
21 | 21 | ||
22 | if (info->bitmask & EBT_802_3_SAP) { | 22 | if (info->bitmask & EBT_802_3_SAP) { |
23 | if (FWINV(info->sap != hdr->llc.ui.ssap, EBT_802_3_SAP)) | 23 | if (FWINV(info->sap != hdr->llc.ui.ssap, EBT_802_3_SAP)) |
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c index a614485828af..ce97c4285f9a 100644 --- a/net/bridge/netfilter/ebt_among.c +++ b/net/bridge/netfilter/ebt_among.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | 16 | ||
17 | static int ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh, | 17 | static int ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh, |
18 | const char *mac, uint32_t ip) | 18 | const char *mac, __be32 ip) |
19 | { | 19 | { |
20 | /* You may be puzzled as to how this code works. | 20 | /* You may be puzzled as to how this code works. |
21 | * Some tricks were used, refer to | 21 | * Some tricks were used, refer to |
@@ -70,7 +70,7 @@ static int ebt_mac_wormhash_check_integrity(const struct ebt_mac_wormhash | |||
70 | return 0; | 70 | return 0; |
71 | } | 71 | } |
72 | 72 | ||
73 | static int get_ip_dst(const struct sk_buff *skb, uint32_t *addr) | 73 | static int get_ip_dst(const struct sk_buff *skb, __be32 *addr) |
74 | { | 74 | { |
75 | if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) { | 75 | if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) { |
76 | struct iphdr _iph, *ih; | 76 | struct iphdr _iph, *ih; |
@@ -81,16 +81,16 @@ static int get_ip_dst(const struct sk_buff *skb, uint32_t *addr) | |||
81 | *addr = ih->daddr; | 81 | *addr = ih->daddr; |
82 | } else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) { | 82 | } else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) { |
83 | struct arphdr _arph, *ah; | 83 | struct arphdr _arph, *ah; |
84 | uint32_t buf, *bp; | 84 | __be32 buf, *bp; |
85 | 85 | ||
86 | ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); | 86 | ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); |
87 | if (ah == NULL || | 87 | if (ah == NULL || |
88 | ah->ar_pln != sizeof(uint32_t) || | 88 | ah->ar_pln != sizeof(__be32) || |
89 | ah->ar_hln != ETH_ALEN) | 89 | ah->ar_hln != ETH_ALEN) |
90 | return -1; | 90 | return -1; |
91 | bp = skb_header_pointer(skb, sizeof(struct arphdr) + | 91 | bp = skb_header_pointer(skb, sizeof(struct arphdr) + |
92 | 2 * ETH_ALEN + sizeof(uint32_t), | 92 | 2 * ETH_ALEN + sizeof(__be32), |
93 | sizeof(uint32_t), &buf); | 93 | sizeof(__be32), &buf); |
94 | if (bp == NULL) | 94 | if (bp == NULL) |
95 | return -1; | 95 | return -1; |
96 | *addr = *bp; | 96 | *addr = *bp; |
@@ -98,7 +98,7 @@ static int get_ip_dst(const struct sk_buff *skb, uint32_t *addr) | |||
98 | return 0; | 98 | return 0; |
99 | } | 99 | } |
100 | 100 | ||
101 | static int get_ip_src(const struct sk_buff *skb, uint32_t *addr) | 101 | static int get_ip_src(const struct sk_buff *skb, __be32 *addr) |
102 | { | 102 | { |
103 | if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) { | 103 | if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) { |
104 | struct iphdr _iph, *ih; | 104 | struct iphdr _iph, *ih; |
@@ -109,15 +109,15 @@ static int get_ip_src(const struct sk_buff *skb, uint32_t *addr) | |||
109 | *addr = ih->saddr; | 109 | *addr = ih->saddr; |
110 | } else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) { | 110 | } else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) { |
111 | struct arphdr _arph, *ah; | 111 | struct arphdr _arph, *ah; |
112 | uint32_t buf, *bp; | 112 | __be32 buf, *bp; |
113 | 113 | ||
114 | ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); | 114 | ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); |
115 | if (ah == NULL || | 115 | if (ah == NULL || |
116 | ah->ar_pln != sizeof(uint32_t) || | 116 | ah->ar_pln != sizeof(__be32) || |
117 | ah->ar_hln != ETH_ALEN) | 117 | ah->ar_hln != ETH_ALEN) |
118 | return -1; | 118 | return -1; |
119 | bp = skb_header_pointer(skb, sizeof(struct arphdr) + | 119 | bp = skb_header_pointer(skb, sizeof(struct arphdr) + |
120 | ETH_ALEN, sizeof(uint32_t), &buf); | 120 | ETH_ALEN, sizeof(__be32), &buf); |
121 | if (bp == NULL) | 121 | if (bp == NULL) |
122 | return -1; | 122 | return -1; |
123 | *addr = *bp; | 123 | *addr = *bp; |
@@ -133,7 +133,7 @@ static int ebt_filter_among(const struct sk_buff *skb, | |||
133 | struct ebt_among_info *info = (struct ebt_among_info *) data; | 133 | struct ebt_among_info *info = (struct ebt_among_info *) data; |
134 | const char *dmac, *smac; | 134 | const char *dmac, *smac; |
135 | const struct ebt_mac_wormhash *wh_dst, *wh_src; | 135 | const struct ebt_mac_wormhash *wh_dst, *wh_src; |
136 | uint32_t dip = 0, sip = 0; | 136 | __be32 dip = 0, sip = 0; |
137 | 137 | ||
138 | wh_dst = ebt_among_wh_dst(info); | 138 | wh_dst = ebt_among_wh_dst(info); |
139 | wh_src = ebt_among_wh_src(info); | 139 | wh_src = ebt_among_wh_src(info); |
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c index a6c81d9f73b8..9c599800a900 100644 --- a/net/bridge/netfilter/ebt_arp.c +++ b/net/bridge/netfilter/ebt_arp.c | |||
@@ -35,10 +35,10 @@ static int ebt_filter_arp(const struct sk_buff *skb, const struct net_device *in | |||
35 | return EBT_NOMATCH; | 35 | return EBT_NOMATCH; |
36 | 36 | ||
37 | if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP)) { | 37 | if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP)) { |
38 | uint32_t _addr, *ap; | 38 | __be32 _addr, *ap; |
39 | 39 | ||
40 | /* IPv4 addresses are always 4 bytes */ | 40 | /* IPv4 addresses are always 4 bytes */ |
41 | if (ah->ar_pln != sizeof(uint32_t)) | 41 | if (ah->ar_pln != sizeof(__be32)) |
42 | return EBT_NOMATCH; | 42 | return EBT_NOMATCH; |
43 | if (info->bitmask & EBT_ARP_SRC_IP) { | 43 | if (info->bitmask & EBT_ARP_SRC_IP) { |
44 | ap = skb_header_pointer(skb, sizeof(struct arphdr) + | 44 | ap = skb_header_pointer(skb, sizeof(struct arphdr) + |
@@ -53,7 +53,7 @@ static int ebt_filter_arp(const struct sk_buff *skb, const struct net_device *in | |||
53 | 53 | ||
54 | if (info->bitmask & EBT_ARP_DST_IP) { | 54 | if (info->bitmask & EBT_ARP_DST_IP) { |
55 | ap = skb_header_pointer(skb, sizeof(struct arphdr) + | 55 | ap = skb_header_pointer(skb, sizeof(struct arphdr) + |
56 | 2*ah->ar_hln+sizeof(uint32_t), | 56 | 2*ah->ar_hln+sizeof(__be32), |
57 | sizeof(_addr), &_addr); | 57 | sizeof(_addr), &_addr); |
58 | if (ap == NULL) | 58 | if (ap == NULL) |
59 | return EBT_NOMATCH; | 59 | return EBT_NOMATCH; |
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c index 65b665ce57b5..e4c642448e1b 100644 --- a/net/bridge/netfilter/ebt_ip.c +++ b/net/bridge/netfilter/ebt_ip.c | |||
@@ -20,8 +20,8 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | 21 | ||
22 | struct tcpudphdr { | 22 | struct tcpudphdr { |
23 | uint16_t src; | 23 | __be16 src; |
24 | uint16_t dst; | 24 | __be16 dst; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | static int ebt_filter_ip(const struct sk_buff *skb, const struct net_device *in, | 27 | static int ebt_filter_ip(const struct sk_buff *skb, const struct net_device *in, |
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index 466ed3440b74..a184f879f253 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c | |||
@@ -38,8 +38,8 @@ static int ebt_log_check(const char *tablename, unsigned int hookmask, | |||
38 | 38 | ||
39 | struct tcpudphdr | 39 | struct tcpudphdr |
40 | { | 40 | { |
41 | uint16_t src; | 41 | __be16 src; |
42 | uint16_t dst; | 42 | __be16 dst; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | struct arppayload | 45 | struct arppayload |
@@ -130,7 +130,7 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum, | |||
130 | * then log the ARP payload */ | 130 | * then log the ARP payload */ |
131 | if (ah->ar_hrd == htons(1) && | 131 | if (ah->ar_hrd == htons(1) && |
132 | ah->ar_hln == ETH_ALEN && | 132 | ah->ar_hln == ETH_ALEN && |
133 | ah->ar_pln == sizeof(uint32_t)) { | 133 | ah->ar_pln == sizeof(__be32)) { |
134 | struct arppayload _arpp, *ap; | 134 | struct arppayload _arpp, *ap; |
135 | 135 | ||
136 | ap = skb_header_pointer(skb, sizeof(_arph), | 136 | ap = skb_header_pointer(skb, sizeof(_arph), |
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c index b54306a934e5..62d23c7b25e6 100644 --- a/net/bridge/netfilter/ebt_mark.c +++ b/net/bridge/netfilter/ebt_mark.c | |||
@@ -25,15 +25,15 @@ static int ebt_target_mark(struct sk_buff **pskb, unsigned int hooknr, | |||
25 | int action = info->target & -16; | 25 | int action = info->target & -16; |
26 | 26 | ||
27 | if (action == MARK_SET_VALUE) | 27 | if (action == MARK_SET_VALUE) |
28 | (*pskb)->nfmark = info->mark; | 28 | (*pskb)->mark = info->mark; |
29 | else if (action == MARK_OR_VALUE) | 29 | else if (action == MARK_OR_VALUE) |
30 | (*pskb)->nfmark |= info->mark; | 30 | (*pskb)->mark |= info->mark; |
31 | else if (action == MARK_AND_VALUE) | 31 | else if (action == MARK_AND_VALUE) |
32 | (*pskb)->nfmark &= info->mark; | 32 | (*pskb)->mark &= info->mark; |
33 | else | 33 | else |
34 | (*pskb)->nfmark ^= info->mark; | 34 | (*pskb)->mark ^= info->mark; |
35 | 35 | ||
36 | return info->target | -16; | 36 | return info->target | ~EBT_VERDICT_BITS; |
37 | } | 37 | } |
38 | 38 | ||
39 | static int ebt_target_mark_check(const char *tablename, unsigned int hookmask, | 39 | static int ebt_target_mark_check(const char *tablename, unsigned int hookmask, |
@@ -44,13 +44,13 @@ static int ebt_target_mark_check(const char *tablename, unsigned int hookmask, | |||
44 | 44 | ||
45 | if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_t_info))) | 45 | if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_t_info))) |
46 | return -EINVAL; | 46 | return -EINVAL; |
47 | tmp = info->target | -16; | 47 | tmp = info->target | ~EBT_VERDICT_BITS; |
48 | if (BASE_CHAIN && tmp == EBT_RETURN) | 48 | if (BASE_CHAIN && tmp == EBT_RETURN) |
49 | return -EINVAL; | 49 | return -EINVAL; |
50 | CLEAR_BASE_CHAIN_BIT; | 50 | CLEAR_BASE_CHAIN_BIT; |
51 | if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0) | 51 | if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0) |
52 | return -EINVAL; | 52 | return -EINVAL; |
53 | tmp = info->target & -16; | 53 | tmp = info->target & ~EBT_VERDICT_BITS; |
54 | if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE && | 54 | if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE && |
55 | tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE) | 55 | tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE) |
56 | return -EINVAL; | 56 | return -EINVAL; |
diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c index a6413e4b4982..025869ee0b68 100644 --- a/net/bridge/netfilter/ebt_mark_m.c +++ b/net/bridge/netfilter/ebt_mark_m.c | |||
@@ -19,8 +19,8 @@ static int ebt_filter_mark(const struct sk_buff *skb, | |||
19 | struct ebt_mark_m_info *info = (struct ebt_mark_m_info *) data; | 19 | struct ebt_mark_m_info *info = (struct ebt_mark_m_info *) data; |
20 | 20 | ||
21 | if (info->bitmask & EBT_MARK_OR) | 21 | if (info->bitmask & EBT_MARK_OR) |
22 | return !(!!(skb->nfmark & info->mask) ^ info->invert); | 22 | return !(!!(skb->mark & info->mask) ^ info->invert); |
23 | return !(((skb->nfmark & info->mask) == info->mark) ^ info->invert); | 23 | return !(((skb->mark & info->mask) == info->mark) ^ info->invert); |
24 | } | 24 | } |
25 | 25 | ||
26 | static int ebt_mark_check(const char *tablename, unsigned int hookmask, | 26 | static int ebt_mark_check(const char *tablename, unsigned int hookmask, |
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c index cbb33e24ca8a..a50722182bfe 100644 --- a/net/bridge/netfilter/ebt_snat.c +++ b/net/bridge/netfilter/ebt_snat.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/netfilter_bridge/ebt_nat.h> | 12 | #include <linux/netfilter_bridge/ebt_nat.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <net/sock.h> | 14 | #include <net/sock.h> |
15 | #include <linux/if_arp.h> | ||
16 | #include <net/arp.h> | ||
15 | 17 | ||
16 | static int ebt_target_snat(struct sk_buff **pskb, unsigned int hooknr, | 18 | static int ebt_target_snat(struct sk_buff **pskb, unsigned int hooknr, |
17 | const struct net_device *in, const struct net_device *out, | 19 | const struct net_device *in, const struct net_device *out, |
@@ -31,24 +33,43 @@ static int ebt_target_snat(struct sk_buff **pskb, unsigned int hooknr, | |||
31 | *pskb = nskb; | 33 | *pskb = nskb; |
32 | } | 34 | } |
33 | memcpy(eth_hdr(*pskb)->h_source, info->mac, ETH_ALEN); | 35 | memcpy(eth_hdr(*pskb)->h_source, info->mac, ETH_ALEN); |
34 | return info->target; | 36 | if (!(info->target & NAT_ARP_BIT) && |
37 | eth_hdr(*pskb)->h_proto == htons(ETH_P_ARP)) { | ||
38 | struct arphdr _ah, *ap; | ||
39 | |||
40 | ap = skb_header_pointer(*pskb, 0, sizeof(_ah), &_ah); | ||
41 | if (ap == NULL) | ||
42 | return EBT_DROP; | ||
43 | if (ap->ar_hln != ETH_ALEN) | ||
44 | goto out; | ||
45 | if (skb_store_bits(*pskb, sizeof(_ah), info->mac,ETH_ALEN)) | ||
46 | return EBT_DROP; | ||
47 | } | ||
48 | out: | ||
49 | return info->target | ~EBT_VERDICT_BITS; | ||
35 | } | 50 | } |
36 | 51 | ||
37 | static int ebt_target_snat_check(const char *tablename, unsigned int hookmask, | 52 | static int ebt_target_snat_check(const char *tablename, unsigned int hookmask, |
38 | const struct ebt_entry *e, void *data, unsigned int datalen) | 53 | const struct ebt_entry *e, void *data, unsigned int datalen) |
39 | { | 54 | { |
40 | struct ebt_nat_info *info = (struct ebt_nat_info *) data; | 55 | struct ebt_nat_info *info = (struct ebt_nat_info *) data; |
56 | int tmp; | ||
41 | 57 | ||
42 | if (datalen != EBT_ALIGN(sizeof(struct ebt_nat_info))) | 58 | if (datalen != EBT_ALIGN(sizeof(struct ebt_nat_info))) |
43 | return -EINVAL; | 59 | return -EINVAL; |
44 | if (BASE_CHAIN && info->target == EBT_RETURN) | 60 | tmp = info->target | ~EBT_VERDICT_BITS; |
61 | if (BASE_CHAIN && tmp == EBT_RETURN) | ||
45 | return -EINVAL; | 62 | return -EINVAL; |
46 | CLEAR_BASE_CHAIN_BIT; | 63 | CLEAR_BASE_CHAIN_BIT; |
47 | if (strcmp(tablename, "nat")) | 64 | if (strcmp(tablename, "nat")) |
48 | return -EINVAL; | 65 | return -EINVAL; |
49 | if (hookmask & ~(1 << NF_BR_POST_ROUTING)) | 66 | if (hookmask & ~(1 << NF_BR_POST_ROUTING)) |
50 | return -EINVAL; | 67 | return -EINVAL; |
51 | if (INVALID_TARGET) | 68 | |
69 | if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0) | ||
70 | return -EINVAL; | ||
71 | tmp = info->target | EBT_VERDICT_BITS; | ||
72 | if ((tmp & ~NAT_ARP_BIT) != ~NAT_ARP_BIT) | ||
52 | return -EINVAL; | 73 | return -EINVAL; |
53 | return 0; | 74 | return 0; |
54 | } | 75 | } |
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index 9f950db3b76f..c1af68b5a29c 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c | |||
@@ -168,7 +168,7 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb, | |||
168 | if (ub->qlen == 1) | 168 | if (ub->qlen == 1) |
169 | skb_set_timestamp(ub->skb, &pm->stamp); | 169 | skb_set_timestamp(ub->skb, &pm->stamp); |
170 | pm->data_len = copy_len; | 170 | pm->data_len = copy_len; |
171 | pm->mark = skb->nfmark; | 171 | pm->mark = skb->mark; |
172 | pm->hook = hooknr; | 172 | pm->hook = hooknr; |
173 | if (uloginfo->prefix != NULL) | 173 | if (uloginfo->prefix != NULL) |
174 | strcpy(pm->prefix, uloginfo->prefix); | 174 | strcpy(pm->prefix, uloginfo->prefix); |
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c index a2b452862b73..7ee377622964 100644 --- a/net/bridge/netfilter/ebt_vlan.c +++ b/net/bridge/netfilter/ebt_vlan.c | |||
@@ -55,7 +55,7 @@ ebt_filter_vlan(const struct sk_buff *skb, | |||
55 | unsigned short id; /* VLAN ID, given from frame TCI */ | 55 | unsigned short id; /* VLAN ID, given from frame TCI */ |
56 | unsigned char prio; /* user_priority, given from frame TCI */ | 56 | unsigned char prio; /* user_priority, given from frame TCI */ |
57 | /* VLAN encapsulated Type/Length field, given from orig frame */ | 57 | /* VLAN encapsulated Type/Length field, given from orig frame */ |
58 | unsigned short encap; | 58 | __be16 encap; |
59 | 59 | ||
60 | fp = skb_header_pointer(skb, 0, sizeof(_frame), &_frame); | 60 | fp = skb_header_pointer(skb, 0, sizeof(_frame), &_frame); |
61 | if (fp == NULL) | 61 | if (fp == NULL) |
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index 9a6e548e148b..d37ce0478938 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c | |||
@@ -23,7 +23,7 @@ static struct ebt_entries initial_chain = { | |||
23 | .policy = EBT_ACCEPT, | 23 | .policy = EBT_ACCEPT, |
24 | }; | 24 | }; |
25 | 25 | ||
26 | static struct ebt_replace initial_table = | 26 | static struct ebt_replace_kernel initial_table = |
27 | { | 27 | { |
28 | .name = "broute", | 28 | .name = "broute", |
29 | .valid_hooks = 1 << NF_BR_BROUTING, | 29 | .valid_hooks = 1 << NF_BR_BROUTING, |
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index 3d5bd44f2395..127135ead2d5 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c | |||
@@ -30,7 +30,7 @@ static struct ebt_entries initial_chains[] = | |||
30 | }, | 30 | }, |
31 | }; | 31 | }; |
32 | 32 | ||
33 | static struct ebt_replace initial_table = | 33 | static struct ebt_replace_kernel initial_table = |
34 | { | 34 | { |
35 | .name = "filter", | 35 | .name = "filter", |
36 | .valid_hooks = FILTER_VALID_HOOKS, | 36 | .valid_hooks = FILTER_VALID_HOOKS, |
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index 04dd42efda1d..9c50488b62eb 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c | |||
@@ -30,7 +30,7 @@ static struct ebt_entries initial_chains[] = | |||
30 | } | 30 | } |
31 | }; | 31 | }; |
32 | 32 | ||
33 | static struct ebt_replace initial_table = | 33 | static struct ebt_replace_kernel initial_table = |
34 | { | 34 | { |
35 | .name = "nat", | 35 | .name = "nat", |
36 | .valid_hooks = NAT_VALID_HOOKS, | 36 | .valid_hooks = NAT_VALID_HOOKS, |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 9f85666f29f7..bee558a41800 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -338,10 +338,11 @@ ebt_check_match(struct ebt_entry_match *m, struct ebt_entry *e, | |||
338 | const char *name, unsigned int hookmask, unsigned int *cnt) | 338 | const char *name, unsigned int hookmask, unsigned int *cnt) |
339 | { | 339 | { |
340 | struct ebt_match *match; | 340 | struct ebt_match *match; |
341 | size_t left = ((char *)e + e->watchers_offset) - (char *)m; | ||
341 | int ret; | 342 | int ret; |
342 | 343 | ||
343 | if (((char *)m) + m->match_size + sizeof(struct ebt_entry_match) > | 344 | if (left < sizeof(struct ebt_entry_match) || |
344 | ((char *)e) + e->watchers_offset) | 345 | left - sizeof(struct ebt_entry_match) < m->match_size) |
345 | return -EINVAL; | 346 | return -EINVAL; |
346 | match = find_match_lock(m->u.name, &ret, &ebt_mutex); | 347 | match = find_match_lock(m->u.name, &ret, &ebt_mutex); |
347 | if (!match) | 348 | if (!match) |
@@ -367,10 +368,11 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct ebt_entry *e, | |||
367 | const char *name, unsigned int hookmask, unsigned int *cnt) | 368 | const char *name, unsigned int hookmask, unsigned int *cnt) |
368 | { | 369 | { |
369 | struct ebt_watcher *watcher; | 370 | struct ebt_watcher *watcher; |
371 | size_t left = ((char *)e + e->target_offset) - (char *)w; | ||
370 | int ret; | 372 | int ret; |
371 | 373 | ||
372 | if (((char *)w) + w->watcher_size + sizeof(struct ebt_entry_watcher) > | 374 | if (left < sizeof(struct ebt_entry_watcher) || |
373 | ((char *)e) + e->target_offset) | 375 | left - sizeof(struct ebt_entry_watcher) < w->watcher_size) |
374 | return -EINVAL; | 376 | return -EINVAL; |
375 | watcher = find_watcher_lock(w->u.name, &ret, &ebt_mutex); | 377 | watcher = find_watcher_lock(w->u.name, &ret, &ebt_mutex); |
376 | if (!watcher) | 378 | if (!watcher) |
@@ -391,35 +393,91 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct ebt_entry *e, | |||
391 | return 0; | 393 | return 0; |
392 | } | 394 | } |
393 | 395 | ||
396 | static int ebt_verify_pointers(struct ebt_replace *repl, | ||
397 | struct ebt_table_info *newinfo) | ||
398 | { | ||
399 | unsigned int limit = repl->entries_size; | ||
400 | unsigned int valid_hooks = repl->valid_hooks; | ||
401 | unsigned int offset = 0; | ||
402 | int i; | ||
403 | |||
404 | for (i = 0; i < NF_BR_NUMHOOKS; i++) | ||
405 | newinfo->hook_entry[i] = NULL; | ||
406 | |||
407 | newinfo->entries_size = repl->entries_size; | ||
408 | newinfo->nentries = repl->nentries; | ||
409 | |||
410 | while (offset < limit) { | ||
411 | size_t left = limit - offset; | ||
412 | struct ebt_entry *e = (void *)newinfo->entries + offset; | ||
413 | |||
414 | if (left < sizeof(unsigned int)) | ||
415 | break; | ||
416 | |||
417 | for (i = 0; i < NF_BR_NUMHOOKS; i++) { | ||
418 | if ((valid_hooks & (1 << i)) == 0) | ||
419 | continue; | ||
420 | if ((char __user *)repl->hook_entry[i] == | ||
421 | repl->entries + offset) | ||
422 | break; | ||
423 | } | ||
424 | |||
425 | if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) { | ||
426 | if (e->bitmask != 0) { | ||
427 | /* we make userspace set this right, | ||
428 | so there is no misunderstanding */ | ||
429 | BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set " | ||
430 | "in distinguisher\n"); | ||
431 | return -EINVAL; | ||
432 | } | ||
433 | if (i != NF_BR_NUMHOOKS) | ||
434 | newinfo->hook_entry[i] = (struct ebt_entries *)e; | ||
435 | if (left < sizeof(struct ebt_entries)) | ||
436 | break; | ||
437 | offset += sizeof(struct ebt_entries); | ||
438 | } else { | ||
439 | if (left < sizeof(struct ebt_entry)) | ||
440 | break; | ||
441 | if (left < e->next_offset) | ||
442 | break; | ||
443 | offset += e->next_offset; | ||
444 | } | ||
445 | } | ||
446 | if (offset != limit) { | ||
447 | BUGPRINT("entries_size too small\n"); | ||
448 | return -EINVAL; | ||
449 | } | ||
450 | |||
451 | /* check if all valid hooks have a chain */ | ||
452 | for (i = 0; i < NF_BR_NUMHOOKS; i++) { | ||
453 | if (!newinfo->hook_entry[i] && | ||
454 | (valid_hooks & (1 << i))) { | ||
455 | BUGPRINT("Valid hook without chain\n"); | ||
456 | return -EINVAL; | ||
457 | } | ||
458 | } | ||
459 | return 0; | ||
460 | } | ||
461 | |||
394 | /* | 462 | /* |
395 | * this one is very careful, as it is the first function | 463 | * this one is very careful, as it is the first function |
396 | * to parse the userspace data | 464 | * to parse the userspace data |
397 | */ | 465 | */ |
398 | static inline int | 466 | static inline int |
399 | ebt_check_entry_size_and_hooks(struct ebt_entry *e, | 467 | ebt_check_entry_size_and_hooks(struct ebt_entry *e, |
400 | struct ebt_table_info *newinfo, char *base, char *limit, | 468 | struct ebt_table_info *newinfo, |
401 | struct ebt_entries **hook_entries, unsigned int *n, unsigned int *cnt, | 469 | unsigned int *n, unsigned int *cnt, |
402 | unsigned int *totalcnt, unsigned int *udc_cnt, unsigned int valid_hooks) | 470 | unsigned int *totalcnt, unsigned int *udc_cnt) |
403 | { | 471 | { |
404 | int i; | 472 | int i; |
405 | 473 | ||
406 | for (i = 0; i < NF_BR_NUMHOOKS; i++) { | 474 | for (i = 0; i < NF_BR_NUMHOOKS; i++) { |
407 | if ((valid_hooks & (1 << i)) == 0) | 475 | if ((void *)e == (void *)newinfo->hook_entry[i]) |
408 | continue; | ||
409 | if ( (char *)hook_entries[i] - base == | ||
410 | (char *)e - newinfo->entries) | ||
411 | break; | 476 | break; |
412 | } | 477 | } |
413 | /* beginning of a new chain | 478 | /* beginning of a new chain |
414 | if i == NF_BR_NUMHOOKS it must be a user defined chain */ | 479 | if i == NF_BR_NUMHOOKS it must be a user defined chain */ |
415 | if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) { | 480 | if (i != NF_BR_NUMHOOKS || !e->bitmask) { |
416 | if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) != 0) { | ||
417 | /* we make userspace set this right, | ||
418 | so there is no misunderstanding */ | ||
419 | BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set " | ||
420 | "in distinguisher\n"); | ||
421 | return -EINVAL; | ||
422 | } | ||
423 | /* this checks if the previous chain has as many entries | 481 | /* this checks if the previous chain has as many entries |
424 | as it said it has */ | 482 | as it said it has */ |
425 | if (*n != *cnt) { | 483 | if (*n != *cnt) { |
@@ -427,12 +485,6 @@ ebt_check_entry_size_and_hooks(struct ebt_entry *e, | |||
427 | "in the chain\n"); | 485 | "in the chain\n"); |
428 | return -EINVAL; | 486 | return -EINVAL; |
429 | } | 487 | } |
430 | /* before we look at the struct, be sure it is not too big */ | ||
431 | if ((char *)hook_entries[i] + sizeof(struct ebt_entries) | ||
432 | > limit) { | ||
433 | BUGPRINT("entries_size too small\n"); | ||
434 | return -EINVAL; | ||
435 | } | ||
436 | if (((struct ebt_entries *)e)->policy != EBT_DROP && | 488 | if (((struct ebt_entries *)e)->policy != EBT_DROP && |
437 | ((struct ebt_entries *)e)->policy != EBT_ACCEPT) { | 489 | ((struct ebt_entries *)e)->policy != EBT_ACCEPT) { |
438 | /* only RETURN from udc */ | 490 | /* only RETURN from udc */ |
@@ -444,8 +496,6 @@ ebt_check_entry_size_and_hooks(struct ebt_entry *e, | |||
444 | } | 496 | } |
445 | if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */ | 497 | if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */ |
446 | (*udc_cnt)++; | 498 | (*udc_cnt)++; |
447 | else | ||
448 | newinfo->hook_entry[i] = (struct ebt_entries *)e; | ||
449 | if (((struct ebt_entries *)e)->counter_offset != *totalcnt) { | 499 | if (((struct ebt_entries *)e)->counter_offset != *totalcnt) { |
450 | BUGPRINT("counter_offset != totalcnt"); | 500 | BUGPRINT("counter_offset != totalcnt"); |
451 | return -EINVAL; | 501 | return -EINVAL; |
@@ -466,7 +516,6 @@ ebt_check_entry_size_and_hooks(struct ebt_entry *e, | |||
466 | BUGPRINT("target size too small\n"); | 516 | BUGPRINT("target size too small\n"); |
467 | return -EINVAL; | 517 | return -EINVAL; |
468 | } | 518 | } |
469 | |||
470 | (*cnt)++; | 519 | (*cnt)++; |
471 | (*totalcnt)++; | 520 | (*totalcnt)++; |
472 | return 0; | 521 | return 0; |
@@ -485,17 +534,14 @@ struct ebt_cl_stack | |||
485 | */ | 534 | */ |
486 | static inline int | 535 | static inline int |
487 | ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo, | 536 | ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo, |
488 | struct ebt_entries **hook_entries, unsigned int *n, unsigned int valid_hooks, | 537 | unsigned int *n, struct ebt_cl_stack *udc) |
489 | struct ebt_cl_stack *udc) | ||
490 | { | 538 | { |
491 | int i; | 539 | int i; |
492 | 540 | ||
493 | /* we're only interested in chain starts */ | 541 | /* we're only interested in chain starts */ |
494 | if (e->bitmask & EBT_ENTRY_OR_ENTRIES) | 542 | if (e->bitmask) |
495 | return 0; | 543 | return 0; |
496 | for (i = 0; i < NF_BR_NUMHOOKS; i++) { | 544 | for (i = 0; i < NF_BR_NUMHOOKS; i++) { |
497 | if ((valid_hooks & (1 << i)) == 0) | ||
498 | continue; | ||
499 | if (newinfo->hook_entry[i] == (struct ebt_entries *)e) | 545 | if (newinfo->hook_entry[i] == (struct ebt_entries *)e) |
500 | break; | 546 | break; |
501 | } | 547 | } |
@@ -541,7 +587,7 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt) | |||
541 | { | 587 | { |
542 | struct ebt_entry_target *t; | 588 | struct ebt_entry_target *t; |
543 | 589 | ||
544 | if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0) | 590 | if (e->bitmask == 0) |
545 | return 0; | 591 | return 0; |
546 | /* we're done */ | 592 | /* we're done */ |
547 | if (cnt && (*cnt)-- == 0) | 593 | if (cnt && (*cnt)-- == 0) |
@@ -558,16 +604,17 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt) | |||
558 | 604 | ||
559 | static inline int | 605 | static inline int |
560 | ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, | 606 | ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, |
561 | const char *name, unsigned int *cnt, unsigned int valid_hooks, | 607 | const char *name, unsigned int *cnt, |
562 | struct ebt_cl_stack *cl_s, unsigned int udc_cnt) | 608 | struct ebt_cl_stack *cl_s, unsigned int udc_cnt) |
563 | { | 609 | { |
564 | struct ebt_entry_target *t; | 610 | struct ebt_entry_target *t; |
565 | struct ebt_target *target; | 611 | struct ebt_target *target; |
566 | unsigned int i, j, hook = 0, hookmask = 0; | 612 | unsigned int i, j, hook = 0, hookmask = 0; |
613 | size_t gap = e->next_offset - e->target_offset; | ||
567 | int ret; | 614 | int ret; |
568 | 615 | ||
569 | /* don't mess with the struct ebt_entries */ | 616 | /* don't mess with the struct ebt_entries */ |
570 | if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0) | 617 | if (e->bitmask == 0) |
571 | return 0; | 618 | return 0; |
572 | 619 | ||
573 | if (e->bitmask & ~EBT_F_MASK) { | 620 | if (e->bitmask & ~EBT_F_MASK) { |
@@ -584,7 +631,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, | |||
584 | } | 631 | } |
585 | /* what hook do we belong to? */ | 632 | /* what hook do we belong to? */ |
586 | for (i = 0; i < NF_BR_NUMHOOKS; i++) { | 633 | for (i = 0; i < NF_BR_NUMHOOKS; i++) { |
587 | if ((valid_hooks & (1 << i)) == 0) | 634 | if (!newinfo->hook_entry[i]) |
588 | continue; | 635 | continue; |
589 | if ((char *)newinfo->hook_entry[i] < (char *)e) | 636 | if ((char *)newinfo->hook_entry[i] < (char *)e) |
590 | hook = i; | 637 | hook = i; |
@@ -625,8 +672,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, | |||
625 | 672 | ||
626 | t->u.target = target; | 673 | t->u.target = target; |
627 | if (t->u.target == &ebt_standard_target) { | 674 | if (t->u.target == &ebt_standard_target) { |
628 | if (e->target_offset + sizeof(struct ebt_standard_target) > | 675 | if (gap < sizeof(struct ebt_standard_target)) { |
629 | e->next_offset) { | ||
630 | BUGPRINT("Standard target size too big\n"); | 676 | BUGPRINT("Standard target size too big\n"); |
631 | ret = -EFAULT; | 677 | ret = -EFAULT; |
632 | goto cleanup_watchers; | 678 | goto cleanup_watchers; |
@@ -637,8 +683,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, | |||
637 | ret = -EFAULT; | 683 | ret = -EFAULT; |
638 | goto cleanup_watchers; | 684 | goto cleanup_watchers; |
639 | } | 685 | } |
640 | } else if ((e->target_offset + t->target_size + | 686 | } else if (t->target_size > gap - sizeof(struct ebt_entry_target) || |
641 | sizeof(struct ebt_entry_target) > e->next_offset) || | ||
642 | (t->u.target->check && | 687 | (t->u.target->check && |
643 | t->u.target->check(name, hookmask, e, t->data, t->target_size) != 0)){ | 688 | t->u.target->check(name, hookmask, e, t->data, t->target_size) != 0)){ |
644 | module_put(t->u.target->me); | 689 | module_put(t->u.target->me); |
@@ -708,7 +753,9 @@ static int check_chainloops(struct ebt_entries *chain, struct ebt_cl_stack *cl_s | |||
708 | BUGPRINT("loop\n"); | 753 | BUGPRINT("loop\n"); |
709 | return -1; | 754 | return -1; |
710 | } | 755 | } |
711 | /* this can't be 0, so the above test is correct */ | 756 | if (cl_s[i].hookmask & (1 << hooknr)) |
757 | goto letscontinue; | ||
758 | /* this can't be 0, so the loop test is correct */ | ||
712 | cl_s[i].cs.n = pos + 1; | 759 | cl_s[i].cs.n = pos + 1; |
713 | pos = 0; | 760 | pos = 0; |
714 | cl_s[i].cs.e = ((void *)e + e->next_offset); | 761 | cl_s[i].cs.e = ((void *)e + e->next_offset); |
@@ -728,42 +775,35 @@ letscontinue: | |||
728 | } | 775 | } |
729 | 776 | ||
730 | /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ | 777 | /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ |
731 | static int translate_table(struct ebt_replace *repl, | 778 | static int translate_table(char *name, struct ebt_table_info *newinfo) |
732 | struct ebt_table_info *newinfo) | ||
733 | { | 779 | { |
734 | unsigned int i, j, k, udc_cnt; | 780 | unsigned int i, j, k, udc_cnt; |
735 | int ret; | 781 | int ret; |
736 | struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */ | 782 | struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */ |
737 | 783 | ||
738 | i = 0; | 784 | i = 0; |
739 | while (i < NF_BR_NUMHOOKS && !(repl->valid_hooks & (1 << i))) | 785 | while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i]) |
740 | i++; | 786 | i++; |
741 | if (i == NF_BR_NUMHOOKS) { | 787 | if (i == NF_BR_NUMHOOKS) { |
742 | BUGPRINT("No valid hooks specified\n"); | 788 | BUGPRINT("No valid hooks specified\n"); |
743 | return -EINVAL; | 789 | return -EINVAL; |
744 | } | 790 | } |
745 | if (repl->hook_entry[i] != (struct ebt_entries *)repl->entries) { | 791 | if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) { |
746 | BUGPRINT("Chains don't start at beginning\n"); | 792 | BUGPRINT("Chains don't start at beginning\n"); |
747 | return -EINVAL; | 793 | return -EINVAL; |
748 | } | 794 | } |
749 | /* make sure chains are ordered after each other in same order | 795 | /* make sure chains are ordered after each other in same order |
750 | as their corresponding hooks */ | 796 | as their corresponding hooks */ |
751 | for (j = i + 1; j < NF_BR_NUMHOOKS; j++) { | 797 | for (j = i + 1; j < NF_BR_NUMHOOKS; j++) { |
752 | if (!(repl->valid_hooks & (1 << j))) | 798 | if (!newinfo->hook_entry[j]) |
753 | continue; | 799 | continue; |
754 | if ( repl->hook_entry[j] <= repl->hook_entry[i] ) { | 800 | if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) { |
755 | BUGPRINT("Hook order must be followed\n"); | 801 | BUGPRINT("Hook order must be followed\n"); |
756 | return -EINVAL; | 802 | return -EINVAL; |
757 | } | 803 | } |
758 | i = j; | 804 | i = j; |
759 | } | 805 | } |
760 | 806 | ||
761 | for (i = 0; i < NF_BR_NUMHOOKS; i++) | ||
762 | newinfo->hook_entry[i] = NULL; | ||
763 | |||
764 | newinfo->entries_size = repl->entries_size; | ||
765 | newinfo->nentries = repl->nentries; | ||
766 | |||
767 | /* do some early checkings and initialize some things */ | 807 | /* do some early checkings and initialize some things */ |
768 | i = 0; /* holds the expected nr. of entries for the chain */ | 808 | i = 0; /* holds the expected nr. of entries for the chain */ |
769 | j = 0; /* holds the up to now counted entries for the chain */ | 809 | j = 0; /* holds the up to now counted entries for the chain */ |
@@ -771,9 +811,8 @@ static int translate_table(struct ebt_replace *repl, | |||
771 | newinfo->nentries afterwards */ | 811 | newinfo->nentries afterwards */ |
772 | udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */ | 812 | udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */ |
773 | ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, | 813 | ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, |
774 | ebt_check_entry_size_and_hooks, newinfo, repl->entries, | 814 | ebt_check_entry_size_and_hooks, newinfo, |
775 | repl->entries + repl->entries_size, repl->hook_entry, &i, &j, &k, | 815 | &i, &j, &k, &udc_cnt); |
776 | &udc_cnt, repl->valid_hooks); | ||
777 | 816 | ||
778 | if (ret != 0) | 817 | if (ret != 0) |
779 | return ret; | 818 | return ret; |
@@ -788,15 +827,6 @@ static int translate_table(struct ebt_replace *repl, | |||
788 | return -EINVAL; | 827 | return -EINVAL; |
789 | } | 828 | } |
790 | 829 | ||
791 | /* check if all valid hooks have a chain */ | ||
792 | for (i = 0; i < NF_BR_NUMHOOKS; i++) { | ||
793 | if (newinfo->hook_entry[i] == NULL && | ||
794 | (repl->valid_hooks & (1 << i))) { | ||
795 | BUGPRINT("Valid hook without chain\n"); | ||
796 | return -EINVAL; | ||
797 | } | ||
798 | } | ||
799 | |||
800 | /* get the location of the udc, put them in an array | 830 | /* get the location of the udc, put them in an array |
801 | while we're at it, allocate the chainstack */ | 831 | while we're at it, allocate the chainstack */ |
802 | if (udc_cnt) { | 832 | if (udc_cnt) { |
@@ -824,8 +854,7 @@ static int translate_table(struct ebt_replace *repl, | |||
824 | return -ENOMEM; | 854 | return -ENOMEM; |
825 | i = 0; /* the i'th udc */ | 855 | i = 0; /* the i'th udc */ |
826 | EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, | 856 | EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, |
827 | ebt_get_udc_positions, newinfo, repl->hook_entry, &i, | 857 | ebt_get_udc_positions, newinfo, &i, cl_s); |
828 | repl->valid_hooks, cl_s); | ||
829 | /* sanity check */ | 858 | /* sanity check */ |
830 | if (i != udc_cnt) { | 859 | if (i != udc_cnt) { |
831 | BUGPRINT("i != udc_cnt\n"); | 860 | BUGPRINT("i != udc_cnt\n"); |
@@ -836,7 +865,7 @@ static int translate_table(struct ebt_replace *repl, | |||
836 | 865 | ||
837 | /* Check for loops */ | 866 | /* Check for loops */ |
838 | for (i = 0; i < NF_BR_NUMHOOKS; i++) | 867 | for (i = 0; i < NF_BR_NUMHOOKS; i++) |
839 | if (repl->valid_hooks & (1 << i)) | 868 | if (newinfo->hook_entry[i]) |
840 | if (check_chainloops(newinfo->hook_entry[i], | 869 | if (check_chainloops(newinfo->hook_entry[i], |
841 | cl_s, udc_cnt, i, newinfo->entries)) { | 870 | cl_s, udc_cnt, i, newinfo->entries)) { |
842 | vfree(cl_s); | 871 | vfree(cl_s); |
@@ -856,8 +885,7 @@ static int translate_table(struct ebt_replace *repl, | |||
856 | /* used to know what we need to clean up if something goes wrong */ | 885 | /* used to know what we need to clean up if something goes wrong */ |
857 | i = 0; | 886 | i = 0; |
858 | ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, | 887 | ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, |
859 | ebt_check_entry, newinfo, repl->name, &i, repl->valid_hooks, | 888 | ebt_check_entry, newinfo, name, &i, cl_s, udc_cnt); |
860 | cl_s, udc_cnt); | ||
861 | if (ret != 0) { | 889 | if (ret != 0) { |
862 | EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, | 890 | EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, |
863 | ebt_cleanup_entry, &i); | 891 | ebt_cleanup_entry, &i); |
@@ -954,7 +982,11 @@ static int do_replace(void __user *user, unsigned int len) | |||
954 | 982 | ||
955 | /* this can get initialized by translate_table() */ | 983 | /* this can get initialized by translate_table() */ |
956 | newinfo->chainstack = NULL; | 984 | newinfo->chainstack = NULL; |
957 | ret = translate_table(&tmp, newinfo); | 985 | ret = ebt_verify_pointers(&tmp, newinfo); |
986 | if (ret != 0) | ||
987 | goto free_counterstmp; | ||
988 | |||
989 | ret = translate_table(tmp.name, newinfo); | ||
958 | 990 | ||
959 | if (ret != 0) | 991 | if (ret != 0) |
960 | goto free_counterstmp; | 992 | goto free_counterstmp; |
@@ -1125,35 +1157,47 @@ int ebt_register_table(struct ebt_table *table) | |||
1125 | { | 1157 | { |
1126 | struct ebt_table_info *newinfo; | 1158 | struct ebt_table_info *newinfo; |
1127 | struct ebt_table *t; | 1159 | struct ebt_table *t; |
1160 | struct ebt_replace_kernel *repl; | ||
1128 | int ret, i, countersize; | 1161 | int ret, i, countersize; |
1162 | void *p; | ||
1129 | 1163 | ||
1130 | if (!table || !table->table ||!table->table->entries || | 1164 | if (!table || !(repl = table->table) || !repl->entries || |
1131 | table->table->entries_size == 0 || | 1165 | repl->entries_size == 0 || |
1132 | table->table->counters || table->private) { | 1166 | repl->counters || table->private) { |
1133 | BUGPRINT("Bad table data for ebt_register_table!!!\n"); | 1167 | BUGPRINT("Bad table data for ebt_register_table!!!\n"); |
1134 | return -EINVAL; | 1168 | return -EINVAL; |
1135 | } | 1169 | } |
1136 | 1170 | ||
1137 | countersize = COUNTER_OFFSET(table->table->nentries) * | 1171 | countersize = COUNTER_OFFSET(repl->nentries) * |
1138 | (highest_possible_processor_id()+1); | 1172 | (highest_possible_processor_id()+1); |
1139 | newinfo = vmalloc(sizeof(*newinfo) + countersize); | 1173 | newinfo = vmalloc(sizeof(*newinfo) + countersize); |
1140 | ret = -ENOMEM; | 1174 | ret = -ENOMEM; |
1141 | if (!newinfo) | 1175 | if (!newinfo) |
1142 | return -ENOMEM; | 1176 | return -ENOMEM; |
1143 | 1177 | ||
1144 | newinfo->entries = vmalloc(table->table->entries_size); | 1178 | p = vmalloc(repl->entries_size); |
1145 | if (!(newinfo->entries)) | 1179 | if (!p) |
1146 | goto free_newinfo; | 1180 | goto free_newinfo; |
1147 | 1181 | ||
1148 | memcpy(newinfo->entries, table->table->entries, | 1182 | memcpy(p, repl->entries, repl->entries_size); |
1149 | table->table->entries_size); | 1183 | newinfo->entries = p; |
1184 | |||
1185 | newinfo->entries_size = repl->entries_size; | ||
1186 | newinfo->nentries = repl->nentries; | ||
1150 | 1187 | ||
1151 | if (countersize) | 1188 | if (countersize) |
1152 | memset(newinfo->counters, 0, countersize); | 1189 | memset(newinfo->counters, 0, countersize); |
1153 | 1190 | ||
1154 | /* fill in newinfo and parse the entries */ | 1191 | /* fill in newinfo and parse the entries */ |
1155 | newinfo->chainstack = NULL; | 1192 | newinfo->chainstack = NULL; |
1156 | ret = translate_table(table->table, newinfo); | 1193 | for (i = 0; i < NF_BR_NUMHOOKS; i++) { |
1194 | if ((repl->valid_hooks & (1 << i)) == 0) | ||
1195 | newinfo->hook_entry[i] = NULL; | ||
1196 | else | ||
1197 | newinfo->hook_entry[i] = p + | ||
1198 | ((char *)repl->hook_entry[i] - repl->entries); | ||
1199 | } | ||
1200 | ret = translate_table(repl->name, newinfo); | ||
1157 | if (ret != 0) { | 1201 | if (ret != 0) { |
1158 | BUGPRINT("Translate_table failed\n"); | 1202 | BUGPRINT("Translate_table failed\n"); |
1159 | goto free_chainstack; | 1203 | goto free_chainstack; |
@@ -1277,33 +1321,33 @@ free_tmp: | |||
1277 | } | 1321 | } |
1278 | 1322 | ||
1279 | static inline int ebt_make_matchname(struct ebt_entry_match *m, | 1323 | static inline int ebt_make_matchname(struct ebt_entry_match *m, |
1280 | char *base, char *ubase) | 1324 | char *base, char __user *ubase) |
1281 | { | 1325 | { |
1282 | char *hlp = ubase - base + (char *)m; | 1326 | char __user *hlp = ubase + ((char *)m - base); |
1283 | if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN)) | 1327 | if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN)) |
1284 | return -EFAULT; | 1328 | return -EFAULT; |
1285 | return 0; | 1329 | return 0; |
1286 | } | 1330 | } |
1287 | 1331 | ||
1288 | static inline int ebt_make_watchername(struct ebt_entry_watcher *w, | 1332 | static inline int ebt_make_watchername(struct ebt_entry_watcher *w, |
1289 | char *base, char *ubase) | 1333 | char *base, char __user *ubase) |
1290 | { | 1334 | { |
1291 | char *hlp = ubase - base + (char *)w; | 1335 | char __user *hlp = ubase + ((char *)w - base); |
1292 | if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN)) | 1336 | if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN)) |
1293 | return -EFAULT; | 1337 | return -EFAULT; |
1294 | return 0; | 1338 | return 0; |
1295 | } | 1339 | } |
1296 | 1340 | ||
1297 | static inline int ebt_make_names(struct ebt_entry *e, char *base, char *ubase) | 1341 | static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *ubase) |
1298 | { | 1342 | { |
1299 | int ret; | 1343 | int ret; |
1300 | char *hlp; | 1344 | char __user *hlp; |
1301 | struct ebt_entry_target *t; | 1345 | struct ebt_entry_target *t; |
1302 | 1346 | ||
1303 | if ((e->bitmask & EBT_ENTRY_OR_ENTRIES) == 0) | 1347 | if (e->bitmask == 0) |
1304 | return 0; | 1348 | return 0; |
1305 | 1349 | ||
1306 | hlp = ubase - base + (char *)e + e->target_offset; | 1350 | hlp = ubase + (((char *)e + e->target_offset) - base); |
1307 | t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); | 1351 | t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); |
1308 | 1352 | ||
1309 | ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase); | 1353 | ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase); |
diff --git a/net/core/Makefile b/net/core/Makefile index 119568077dab..73272d506e93 100644 --- a/net/core/Makefile +++ b/net/core/Makefile | |||
@@ -12,7 +12,6 @@ obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ | |||
12 | 12 | ||
13 | obj-$(CONFIG_XFRM) += flow.o | 13 | obj-$(CONFIG_XFRM) += flow.o |
14 | obj-$(CONFIG_SYSFS) += net-sysfs.o | 14 | obj-$(CONFIG_SYSFS) += net-sysfs.o |
15 | obj-$(CONFIG_NET_DIVERT) += dv.o | ||
16 | obj-$(CONFIG_NET_PKTGEN) += pktgen.o | 15 | obj-$(CONFIG_NET_PKTGEN) += pktgen.o |
17 | obj-$(CONFIG_WIRELESS_EXT) += wireless.o | 16 | obj-$(CONFIG_WIRELESS_EXT) += wireless.o |
18 | obj-$(CONFIG_NETPOLL) += netpoll.o | 17 | obj-$(CONFIG_NETPOLL) += netpoll.o |
diff --git a/net/core/datagram.c b/net/core/datagram.c index f558c61aecc7..797fdd4352ce 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -321,7 +321,7 @@ fault: | |||
321 | 321 | ||
322 | static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, | 322 | static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, |
323 | u8 __user *to, int len, | 323 | u8 __user *to, int len, |
324 | unsigned int *csump) | 324 | __wsum *csump) |
325 | { | 325 | { |
326 | int start = skb_headlen(skb); | 326 | int start = skb_headlen(skb); |
327 | int pos = 0; | 327 | int pos = 0; |
@@ -350,7 +350,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, | |||
350 | 350 | ||
351 | end = start + skb_shinfo(skb)->frags[i].size; | 351 | end = start + skb_shinfo(skb)->frags[i].size; |
352 | if ((copy = end - offset) > 0) { | 352 | if ((copy = end - offset) > 0) { |
353 | unsigned int csum2; | 353 | __wsum csum2; |
354 | int err = 0; | 354 | int err = 0; |
355 | u8 *vaddr; | 355 | u8 *vaddr; |
356 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 356 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
@@ -386,7 +386,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, | |||
386 | 386 | ||
387 | end = start + list->len; | 387 | end = start + list->len; |
388 | if ((copy = end - offset) > 0) { | 388 | if ((copy = end - offset) > 0) { |
389 | unsigned int csum2 = 0; | 389 | __wsum csum2 = 0; |
390 | if (copy > len) | 390 | if (copy > len) |
391 | copy = len; | 391 | copy = len; |
392 | if (skb_copy_and_csum_datagram(list, | 392 | if (skb_copy_and_csum_datagram(list, |
@@ -411,11 +411,11 @@ fault: | |||
411 | return -EFAULT; | 411 | return -EFAULT; |
412 | } | 412 | } |
413 | 413 | ||
414 | unsigned int __skb_checksum_complete(struct sk_buff *skb) | 414 | __sum16 __skb_checksum_complete(struct sk_buff *skb) |
415 | { | 415 | { |
416 | unsigned int sum; | 416 | __sum16 sum; |
417 | 417 | ||
418 | sum = (u16)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)); | 418 | sum = csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)); |
419 | if (likely(!sum)) { | 419 | if (likely(!sum)) { |
420 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) | 420 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
421 | netdev_rx_csum_fault(skb->dev); | 421 | netdev_rx_csum_fault(skb->dev); |
@@ -441,7 +441,7 @@ EXPORT_SYMBOL(__skb_checksum_complete); | |||
441 | int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, | 441 | int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, |
442 | int hlen, struct iovec *iov) | 442 | int hlen, struct iovec *iov) |
443 | { | 443 | { |
444 | unsigned int csum; | 444 | __wsum csum; |
445 | int chunk = skb->len - hlen; | 445 | int chunk = skb->len - hlen; |
446 | 446 | ||
447 | /* Skip filled elements. | 447 | /* Skip filled elements. |
@@ -460,7 +460,7 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, | |||
460 | if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, | 460 | if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, |
461 | chunk, &csum)) | 461 | chunk, &csum)) |
462 | goto fault; | 462 | goto fault; |
463 | if ((unsigned short)csum_fold(csum)) | 463 | if (csum_fold(csum)) |
464 | goto csum_error; | 464 | goto csum_error; |
465 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) | 465 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
466 | netdev_rx_csum_fault(skb->dev); | 466 | netdev_rx_csum_fault(skb->dev); |
diff --git a/net/core/dev.c b/net/core/dev.c index 81c426adcd1e..59d058a3b504 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -98,7 +98,6 @@ | |||
98 | #include <linux/seq_file.h> | 98 | #include <linux/seq_file.h> |
99 | #include <linux/stat.h> | 99 | #include <linux/stat.h> |
100 | #include <linux/if_bridge.h> | 100 | #include <linux/if_bridge.h> |
101 | #include <linux/divert.h> | ||
102 | #include <net/dst.h> | 101 | #include <net/dst.h> |
103 | #include <net/pkt_sched.h> | 102 | #include <net/pkt_sched.h> |
104 | #include <net/checksum.h> | 103 | #include <net/checksum.h> |
@@ -1170,7 +1169,7 @@ EXPORT_SYMBOL(netif_device_attach); | |||
1170 | */ | 1169 | */ |
1171 | int skb_checksum_help(struct sk_buff *skb) | 1170 | int skb_checksum_help(struct sk_buff *skb) |
1172 | { | 1171 | { |
1173 | unsigned int csum; | 1172 | __wsum csum; |
1174 | int ret = 0, offset = skb->h.raw - skb->data; | 1173 | int ret = 0, offset = skb->h.raw - skb->data; |
1175 | 1174 | ||
1176 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 1175 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
@@ -1192,9 +1191,9 @@ int skb_checksum_help(struct sk_buff *skb) | |||
1192 | 1191 | ||
1193 | offset = skb->tail - skb->h.raw; | 1192 | offset = skb->tail - skb->h.raw; |
1194 | BUG_ON(offset <= 0); | 1193 | BUG_ON(offset <= 0); |
1195 | BUG_ON(skb->csum + 2 > offset); | 1194 | BUG_ON(skb->csum_offset + 2 > offset); |
1196 | 1195 | ||
1197 | *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); | 1196 | *(__sum16*)(skb->h.raw + skb->csum_offset) = csum_fold(csum); |
1198 | 1197 | ||
1199 | out_set_summed: | 1198 | out_set_summed: |
1200 | skb->ip_summed = CHECKSUM_NONE; | 1199 | skb->ip_summed = CHECKSUM_NONE; |
@@ -1216,7 +1215,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) | |||
1216 | { | 1215 | { |
1217 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | 1216 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); |
1218 | struct packet_type *ptype; | 1217 | struct packet_type *ptype; |
1219 | int type = skb->protocol; | 1218 | __be16 type = skb->protocol; |
1220 | int err; | 1219 | int err; |
1221 | 1220 | ||
1222 | BUG_ON(skb_shinfo(skb)->frag_list); | 1221 | BUG_ON(skb_shinfo(skb)->frag_list); |
@@ -1767,7 +1766,7 @@ int netif_receive_skb(struct sk_buff *skb) | |||
1767 | struct packet_type *ptype, *pt_prev; | 1766 | struct packet_type *ptype, *pt_prev; |
1768 | struct net_device *orig_dev; | 1767 | struct net_device *orig_dev; |
1769 | int ret = NET_RX_DROP; | 1768 | int ret = NET_RX_DROP; |
1770 | unsigned short type; | 1769 | __be16 type; |
1771 | 1770 | ||
1772 | /* if we've gotten here through NAPI, check netpoll */ | 1771 | /* if we've gotten here through NAPI, check netpoll */ |
1773 | if (skb->dev->poll && netpoll_rx(skb)) | 1772 | if (skb->dev->poll && netpoll_rx(skb)) |
@@ -1827,8 +1826,6 @@ int netif_receive_skb(struct sk_buff *skb) | |||
1827 | ncls: | 1826 | ncls: |
1828 | #endif | 1827 | #endif |
1829 | 1828 | ||
1830 | handle_diverter(skb); | ||
1831 | |||
1832 | if (handle_bridge(&skb, &pt_prev, &ret, orig_dev)) | 1829 | if (handle_bridge(&skb, &pt_prev, &ret, orig_dev)) |
1833 | goto out; | 1830 | goto out; |
1834 | 1831 | ||
@@ -2898,10 +2895,6 @@ int register_netdevice(struct net_device *dev) | |||
2898 | spin_lock_init(&dev->ingress_lock); | 2895 | spin_lock_init(&dev->ingress_lock); |
2899 | #endif | 2896 | #endif |
2900 | 2897 | ||
2901 | ret = alloc_divert_blk(dev); | ||
2902 | if (ret) | ||
2903 | goto out; | ||
2904 | |||
2905 | dev->iflink = -1; | 2898 | dev->iflink = -1; |
2906 | 2899 | ||
2907 | /* Init, if this function is available */ | 2900 | /* Init, if this function is available */ |
@@ -2910,13 +2903,13 @@ int register_netdevice(struct net_device *dev) | |||
2910 | if (ret) { | 2903 | if (ret) { |
2911 | if (ret > 0) | 2904 | if (ret > 0) |
2912 | ret = -EIO; | 2905 | ret = -EIO; |
2913 | goto out_err; | 2906 | goto out; |
2914 | } | 2907 | } |
2915 | } | 2908 | } |
2916 | 2909 | ||
2917 | if (!dev_valid_name(dev->name)) { | 2910 | if (!dev_valid_name(dev->name)) { |
2918 | ret = -EINVAL; | 2911 | ret = -EINVAL; |
2919 | goto out_err; | 2912 | goto out; |
2920 | } | 2913 | } |
2921 | 2914 | ||
2922 | dev->ifindex = dev_new_index(); | 2915 | dev->ifindex = dev_new_index(); |
@@ -2930,7 +2923,7 @@ int register_netdevice(struct net_device *dev) | |||
2930 | = hlist_entry(p, struct net_device, name_hlist); | 2923 | = hlist_entry(p, struct net_device, name_hlist); |
2931 | if (!strncmp(d->name, dev->name, IFNAMSIZ)) { | 2924 | if (!strncmp(d->name, dev->name, IFNAMSIZ)) { |
2932 | ret = -EEXIST; | 2925 | ret = -EEXIST; |
2933 | goto out_err; | 2926 | goto out; |
2934 | } | 2927 | } |
2935 | } | 2928 | } |
2936 | 2929 | ||
@@ -2974,7 +2967,7 @@ int register_netdevice(struct net_device *dev) | |||
2974 | 2967 | ||
2975 | ret = netdev_register_sysfs(dev); | 2968 | ret = netdev_register_sysfs(dev); |
2976 | if (ret) | 2969 | if (ret) |
2977 | goto out_err; | 2970 | goto out; |
2978 | dev->reg_state = NETREG_REGISTERED; | 2971 | dev->reg_state = NETREG_REGISTERED; |
2979 | 2972 | ||
2980 | /* | 2973 | /* |
@@ -3001,9 +2994,6 @@ int register_netdevice(struct net_device *dev) | |||
3001 | 2994 | ||
3002 | out: | 2995 | out: |
3003 | return ret; | 2996 | return ret; |
3004 | out_err: | ||
3005 | free_divert_blk(dev); | ||
3006 | goto out; | ||
3007 | } | 2997 | } |
3008 | 2998 | ||
3009 | /** | 2999 | /** |
@@ -3035,15 +3025,6 @@ int register_netdev(struct net_device *dev) | |||
3035 | goto out; | 3025 | goto out; |
3036 | } | 3026 | } |
3037 | 3027 | ||
3038 | /* | ||
3039 | * Back compatibility hook. Kill this one in 2.5 | ||
3040 | */ | ||
3041 | if (dev->name[0] == 0 || dev->name[0] == ' ') { | ||
3042 | err = dev_alloc_name(dev, "eth%d"); | ||
3043 | if (err < 0) | ||
3044 | goto out; | ||
3045 | } | ||
3046 | |||
3047 | err = register_netdevice(dev); | 3028 | err = register_netdevice(dev); |
3048 | out: | 3029 | out: |
3049 | rtnl_unlock(); | 3030 | rtnl_unlock(); |
@@ -3329,8 +3310,6 @@ int unregister_netdevice(struct net_device *dev) | |||
3329 | /* Notifier chain MUST detach us from master device. */ | 3310 | /* Notifier chain MUST detach us from master device. */ |
3330 | BUG_TRAP(!dev->master); | 3311 | BUG_TRAP(!dev->master); |
3331 | 3312 | ||
3332 | free_divert_blk(dev); | ||
3333 | |||
3334 | /* Finish processing unregister after unlock */ | 3313 | /* Finish processing unregister after unlock */ |
3335 | net_set_todo(dev); | 3314 | net_set_todo(dev); |
3336 | 3315 | ||
diff --git a/net/core/dv.c b/net/core/dv.c deleted file mode 100644 index 29ee77f15932..000000000000 --- a/net/core/dv.c +++ /dev/null | |||
@@ -1,546 +0,0 @@ | |||
1 | /* | ||
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | ||
3 | * operating system. INET is implemented using the BSD Socket | ||
4 | * interface as the means of communication with the user level. | ||
5 | * | ||
6 | * Generic frame diversion | ||
7 | * | ||
8 | * Authors: | ||
9 | * Benoit LOCHER: initial integration within the kernel with support for ethernet | ||
10 | * Dave Miller: improvement on the code (correctness, performance and source files) | ||
11 | * | ||
12 | */ | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/sched.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/socket.h> | ||
20 | #include <linux/in.h> | ||
21 | #include <linux/inet.h> | ||
22 | #include <linux/ip.h> | ||
23 | #include <linux/udp.h> | ||
24 | #include <linux/netdevice.h> | ||
25 | #include <linux/etherdevice.h> | ||
26 | #include <linux/skbuff.h> | ||
27 | #include <linux/capability.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <net/dst.h> | ||
31 | #include <net/arp.h> | ||
32 | #include <net/sock.h> | ||
33 | #include <net/ipv6.h> | ||
34 | #include <net/ip.h> | ||
35 | #include <asm/uaccess.h> | ||
36 | #include <asm/system.h> | ||
37 | #include <asm/checksum.h> | ||
38 | #include <linux/divert.h> | ||
39 | #include <linux/sockios.h> | ||
40 | |||
41 | const char sysctl_divert_version[32]="0.46"; /* Current version */ | ||
42 | |||
43 | static int __init dv_init(void) | ||
44 | { | ||
45 | return 0; | ||
46 | } | ||
47 | module_init(dv_init); | ||
48 | |||
49 | /* | ||
50 | * Allocate a divert_blk for a device. This must be an ethernet nic. | ||
51 | */ | ||
52 | int alloc_divert_blk(struct net_device *dev) | ||
53 | { | ||
54 | int alloc_size = (sizeof(struct divert_blk) + 3) & ~3; | ||
55 | |||
56 | dev->divert = NULL; | ||
57 | if (dev->type == ARPHRD_ETHER) { | ||
58 | dev->divert = kzalloc(alloc_size, GFP_KERNEL); | ||
59 | if (dev->divert == NULL) { | ||
60 | printk(KERN_INFO "divert: unable to allocate divert_blk for %s\n", | ||
61 | dev->name); | ||
62 | return -ENOMEM; | ||
63 | } | ||
64 | dev_hold(dev); | ||
65 | } | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Free a divert_blk allocated by the above function, if it was | ||
72 | * allocated on that device. | ||
73 | */ | ||
74 | void free_divert_blk(struct net_device *dev) | ||
75 | { | ||
76 | if (dev->divert) { | ||
77 | kfree(dev->divert); | ||
78 | dev->divert=NULL; | ||
79 | dev_put(dev); | ||
80 | } | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Adds a tcp/udp (source or dest) port to an array | ||
85 | */ | ||
86 | static int add_port(u16 ports[], u16 port) | ||
87 | { | ||
88 | int i; | ||
89 | |||
90 | if (port == 0) | ||
91 | return -EINVAL; | ||
92 | |||
93 | /* Storing directly in network format for performance, | ||
94 | * thanks Dave :) | ||
95 | */ | ||
96 | port = htons(port); | ||
97 | |||
98 | for (i = 0; i < MAX_DIVERT_PORTS; i++) { | ||
99 | if (ports[i] == port) | ||
100 | return -EALREADY; | ||
101 | } | ||
102 | |||
103 | for (i = 0; i < MAX_DIVERT_PORTS; i++) { | ||
104 | if (ports[i] == 0) { | ||
105 | ports[i] = port; | ||
106 | return 0; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | return -ENOBUFS; | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Removes a port from an array tcp/udp (source or dest) | ||
115 | */ | ||
116 | static int remove_port(u16 ports[], u16 port) | ||
117 | { | ||
118 | int i; | ||
119 | |||
120 | if (port == 0) | ||
121 | return -EINVAL; | ||
122 | |||
123 | /* Storing directly in network format for performance, | ||
124 | * thanks Dave ! | ||
125 | */ | ||
126 | port = htons(port); | ||
127 | |||
128 | for (i = 0; i < MAX_DIVERT_PORTS; i++) { | ||
129 | if (ports[i] == port) { | ||
130 | ports[i] = 0; | ||
131 | return 0; | ||
132 | } | ||
133 | } | ||
134 | |||
135 | return -EINVAL; | ||
136 | } | ||
137 | |||
138 | /* Some basic sanity checks on the arguments passed to divert_ioctl() */ | ||
139 | static int check_args(struct divert_cf *div_cf, struct net_device **dev) | ||
140 | { | ||
141 | char devname[32]; | ||
142 | int ret; | ||
143 | |||
144 | if (dev == NULL) | ||
145 | return -EFAULT; | ||
146 | |||
147 | /* GETVERSION: all other args are unused */ | ||
148 | if (div_cf->cmd == DIVCMD_GETVERSION) | ||
149 | return 0; | ||
150 | |||
151 | /* Network device index should reasonably be between 0 and 1000 :) */ | ||
152 | if (div_cf->dev_index < 0 || div_cf->dev_index > 1000) | ||
153 | return -EINVAL; | ||
154 | |||
155 | /* Let's try to find the ifname */ | ||
156 | sprintf(devname, "eth%d", div_cf->dev_index); | ||
157 | *dev = dev_get_by_name(devname); | ||
158 | |||
159 | /* dev should NOT be null */ | ||
160 | if (*dev == NULL) | ||
161 | return -EINVAL; | ||
162 | |||
163 | ret = 0; | ||
164 | |||
165 | /* user issuing the ioctl must be a super one :) */ | ||
166 | if (!capable(CAP_SYS_ADMIN)) { | ||
167 | ret = -EPERM; | ||
168 | goto out; | ||
169 | } | ||
170 | |||
171 | /* Device must have a divert_blk member NOT null */ | ||
172 | if ((*dev)->divert == NULL) | ||
173 | ret = -EINVAL; | ||
174 | out: | ||
175 | dev_put(*dev); | ||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * control function of the diverter | ||
181 | */ | ||
182 | #if 0 | ||
183 | #define DVDBG(a) \ | ||
184 | printk(KERN_DEBUG "divert_ioctl() line %d %s\n", __LINE__, (a)) | ||
185 | #else | ||
186 | #define DVDBG(a) | ||
187 | #endif | ||
188 | |||
189 | int divert_ioctl(unsigned int cmd, struct divert_cf __user *arg) | ||
190 | { | ||
191 | struct divert_cf div_cf; | ||
192 | struct divert_blk *div_blk; | ||
193 | struct net_device *dev; | ||
194 | int ret; | ||
195 | |||
196 | switch (cmd) { | ||
197 | case SIOCGIFDIVERT: | ||
198 | DVDBG("SIOCGIFDIVERT, copy_from_user"); | ||
199 | if (copy_from_user(&div_cf, arg, sizeof(struct divert_cf))) | ||
200 | return -EFAULT; | ||
201 | DVDBG("before check_args"); | ||
202 | ret = check_args(&div_cf, &dev); | ||
203 | if (ret) | ||
204 | return ret; | ||
205 | DVDBG("after checkargs"); | ||
206 | div_blk = dev->divert; | ||
207 | |||
208 | DVDBG("befre switch()"); | ||
209 | switch (div_cf.cmd) { | ||
210 | case DIVCMD_GETSTATUS: | ||
211 | /* Now, just give the user the raw divert block | ||
212 | * for him to play with :) | ||
213 | */ | ||
214 | if (copy_to_user(div_cf.arg1.ptr, dev->divert, | ||
215 | sizeof(struct divert_blk))) | ||
216 | return -EFAULT; | ||
217 | break; | ||
218 | |||
219 | case DIVCMD_GETVERSION: | ||
220 | DVDBG("GETVERSION: checking ptr"); | ||
221 | if (div_cf.arg1.ptr == NULL) | ||
222 | return -EINVAL; | ||
223 | DVDBG("GETVERSION: copying data to userland"); | ||
224 | if (copy_to_user(div_cf.arg1.ptr, | ||
225 | sysctl_divert_version, 32)) | ||
226 | return -EFAULT; | ||
227 | DVDBG("GETVERSION: data copied"); | ||
228 | break; | ||
229 | |||
230 | default: | ||
231 | return -EINVAL; | ||
232 | } | ||
233 | |||
234 | break; | ||
235 | |||
236 | case SIOCSIFDIVERT: | ||
237 | if (copy_from_user(&div_cf, arg, sizeof(struct divert_cf))) | ||
238 | return -EFAULT; | ||
239 | |||
240 | ret = check_args(&div_cf, &dev); | ||
241 | if (ret) | ||
242 | return ret; | ||
243 | |||
244 | div_blk = dev->divert; | ||
245 | |||
246 | switch(div_cf.cmd) { | ||
247 | case DIVCMD_RESET: | ||
248 | div_blk->divert = 0; | ||
249 | div_blk->protos = DIVERT_PROTO_NONE; | ||
250 | memset(div_blk->tcp_dst, 0, | ||
251 | MAX_DIVERT_PORTS * sizeof(u16)); | ||
252 | memset(div_blk->tcp_src, 0, | ||
253 | MAX_DIVERT_PORTS * sizeof(u16)); | ||
254 | memset(div_blk->udp_dst, 0, | ||
255 | MAX_DIVERT_PORTS * sizeof(u16)); | ||
256 | memset(div_blk->udp_src, 0, | ||
257 | MAX_DIVERT_PORTS * sizeof(u16)); | ||
258 | return 0; | ||
259 | |||
260 | case DIVCMD_DIVERT: | ||
261 | switch(div_cf.arg1.int32) { | ||
262 | case DIVARG1_ENABLE: | ||
263 | if (div_blk->divert) | ||
264 | return -EALREADY; | ||
265 | div_blk->divert = 1; | ||
266 | break; | ||
267 | |||
268 | case DIVARG1_DISABLE: | ||
269 | if (!div_blk->divert) | ||
270 | return -EALREADY; | ||
271 | div_blk->divert = 0; | ||
272 | break; | ||
273 | |||
274 | default: | ||
275 | return -EINVAL; | ||
276 | } | ||
277 | |||
278 | break; | ||
279 | |||
280 | case DIVCMD_IP: | ||
281 | switch(div_cf.arg1.int32) { | ||
282 | case DIVARG1_ENABLE: | ||
283 | if (div_blk->protos & DIVERT_PROTO_IP) | ||
284 | return -EALREADY; | ||
285 | div_blk->protos |= DIVERT_PROTO_IP; | ||
286 | break; | ||
287 | |||
288 | case DIVARG1_DISABLE: | ||
289 | if (!(div_blk->protos & DIVERT_PROTO_IP)) | ||
290 | return -EALREADY; | ||
291 | div_blk->protos &= ~DIVERT_PROTO_IP; | ||
292 | break; | ||
293 | |||
294 | default: | ||
295 | return -EINVAL; | ||
296 | } | ||
297 | |||
298 | break; | ||
299 | |||
300 | case DIVCMD_TCP: | ||
301 | switch(div_cf.arg1.int32) { | ||
302 | case DIVARG1_ENABLE: | ||
303 | if (div_blk->protos & DIVERT_PROTO_TCP) | ||
304 | return -EALREADY; | ||
305 | div_blk->protos |= DIVERT_PROTO_TCP; | ||
306 | break; | ||
307 | |||
308 | case DIVARG1_DISABLE: | ||
309 | if (!(div_blk->protos & DIVERT_PROTO_TCP)) | ||
310 | return -EALREADY; | ||
311 | div_blk->protos &= ~DIVERT_PROTO_TCP; | ||
312 | break; | ||
313 | |||
314 | default: | ||
315 | return -EINVAL; | ||
316 | } | ||
317 | |||
318 | break; | ||
319 | |||
320 | case DIVCMD_TCPDST: | ||
321 | switch(div_cf.arg1.int32) { | ||
322 | case DIVARG1_ADD: | ||
323 | return add_port(div_blk->tcp_dst, | ||
324 | div_cf.arg2.uint16); | ||
325 | |||
326 | case DIVARG1_REMOVE: | ||
327 | return remove_port(div_blk->tcp_dst, | ||
328 | div_cf.arg2.uint16); | ||
329 | |||
330 | default: | ||
331 | return -EINVAL; | ||
332 | } | ||
333 | |||
334 | break; | ||
335 | |||
336 | case DIVCMD_TCPSRC: | ||
337 | switch(div_cf.arg1.int32) { | ||
338 | case DIVARG1_ADD: | ||
339 | return add_port(div_blk->tcp_src, | ||
340 | div_cf.arg2.uint16); | ||
341 | |||
342 | case DIVARG1_REMOVE: | ||
343 | return remove_port(div_blk->tcp_src, | ||
344 | div_cf.arg2.uint16); | ||
345 | |||
346 | default: | ||
347 | return -EINVAL; | ||
348 | } | ||
349 | |||
350 | break; | ||
351 | |||
352 | case DIVCMD_UDP: | ||
353 | switch(div_cf.arg1.int32) { | ||
354 | case DIVARG1_ENABLE: | ||
355 | if (div_blk->protos & DIVERT_PROTO_UDP) | ||
356 | return -EALREADY; | ||
357 | div_blk->protos |= DIVERT_PROTO_UDP; | ||
358 | break; | ||
359 | |||
360 | case DIVARG1_DISABLE: | ||
361 | if (!(div_blk->protos & DIVERT_PROTO_UDP)) | ||
362 | return -EALREADY; | ||
363 | div_blk->protos &= ~DIVERT_PROTO_UDP; | ||
364 | break; | ||
365 | |||
366 | default: | ||
367 | return -EINVAL; | ||
368 | } | ||
369 | |||
370 | break; | ||
371 | |||
372 | case DIVCMD_UDPDST: | ||
373 | switch(div_cf.arg1.int32) { | ||
374 | case DIVARG1_ADD: | ||
375 | return add_port(div_blk->udp_dst, | ||
376 | div_cf.arg2.uint16); | ||
377 | |||
378 | case DIVARG1_REMOVE: | ||
379 | return remove_port(div_blk->udp_dst, | ||
380 | div_cf.arg2.uint16); | ||
381 | |||
382 | default: | ||
383 | return -EINVAL; | ||
384 | } | ||
385 | |||
386 | break; | ||
387 | |||
388 | case DIVCMD_UDPSRC: | ||
389 | switch(div_cf.arg1.int32) { | ||
390 | case DIVARG1_ADD: | ||
391 | return add_port(div_blk->udp_src, | ||
392 | div_cf.arg2.uint16); | ||
393 | |||
394 | case DIVARG1_REMOVE: | ||
395 | return remove_port(div_blk->udp_src, | ||
396 | div_cf.arg2.uint16); | ||
397 | |||
398 | default: | ||
399 | return -EINVAL; | ||
400 | } | ||
401 | |||
402 | break; | ||
403 | |||
404 | case DIVCMD_ICMP: | ||
405 | switch(div_cf.arg1.int32) { | ||
406 | case DIVARG1_ENABLE: | ||
407 | if (div_blk->protos & DIVERT_PROTO_ICMP) | ||
408 | return -EALREADY; | ||
409 | div_blk->protos |= DIVERT_PROTO_ICMP; | ||
410 | break; | ||
411 | |||
412 | case DIVARG1_DISABLE: | ||
413 | if (!(div_blk->protos & DIVERT_PROTO_ICMP)) | ||
414 | return -EALREADY; | ||
415 | div_blk->protos &= ~DIVERT_PROTO_ICMP; | ||
416 | break; | ||
417 | |||
418 | default: | ||
419 | return -EINVAL; | ||
420 | } | ||
421 | |||
422 | break; | ||
423 | |||
424 | default: | ||
425 | return -EINVAL; | ||
426 | } | ||
427 | |||
428 | break; | ||
429 | |||
430 | default: | ||
431 | return -EINVAL; | ||
432 | } | ||
433 | |||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | |||
438 | /* | ||
439 | * Check if packet should have its dest mac address set to the box itself | ||
440 | * for diversion | ||
441 | */ | ||
442 | |||
443 | #define ETH_DIVERT_FRAME(skb) \ | ||
444 | memcpy(eth_hdr(skb), skb->dev->dev_addr, ETH_ALEN); \ | ||
445 | skb->pkt_type=PACKET_HOST | ||
446 | |||
447 | void divert_frame(struct sk_buff *skb) | ||
448 | { | ||
449 | struct ethhdr *eth = eth_hdr(skb); | ||
450 | struct iphdr *iph; | ||
451 | struct tcphdr *tcph; | ||
452 | struct udphdr *udph; | ||
453 | struct divert_blk *divert = skb->dev->divert; | ||
454 | int i, src, dst; | ||
455 | unsigned char *skb_data_end = skb->data + skb->len; | ||
456 | |||
457 | /* Packet is already aimed at us, return */ | ||
458 | if (!compare_ether_addr(eth->h_dest, skb->dev->dev_addr)) | ||
459 | return; | ||
460 | |||
461 | /* proto is not IP, do nothing */ | ||
462 | if (eth->h_proto != htons(ETH_P_IP)) | ||
463 | return; | ||
464 | |||
465 | /* Divert all IP frames ? */ | ||
466 | if (divert->protos & DIVERT_PROTO_IP) { | ||
467 | ETH_DIVERT_FRAME(skb); | ||
468 | return; | ||
469 | } | ||
470 | |||
471 | /* Check for possible (maliciously) malformed IP frame (thanks Dave) */ | ||
472 | iph = (struct iphdr *) skb->data; | ||
473 | if (((iph->ihl<<2)+(unsigned char*)(iph)) >= skb_data_end) { | ||
474 | printk(KERN_INFO "divert: malformed IP packet !\n"); | ||
475 | return; | ||
476 | } | ||
477 | |||
478 | switch (iph->protocol) { | ||
479 | /* Divert all ICMP frames ? */ | ||
480 | case IPPROTO_ICMP: | ||
481 | if (divert->protos & DIVERT_PROTO_ICMP) { | ||
482 | ETH_DIVERT_FRAME(skb); | ||
483 | return; | ||
484 | } | ||
485 | break; | ||
486 | |||
487 | /* Divert all TCP frames ? */ | ||
488 | case IPPROTO_TCP: | ||
489 | if (divert->protos & DIVERT_PROTO_TCP) { | ||
490 | ETH_DIVERT_FRAME(skb); | ||
491 | return; | ||
492 | } | ||
493 | |||
494 | /* Check for possible (maliciously) malformed IP | ||
495 | * frame (thanx Dave) | ||
496 | */ | ||
497 | tcph = (struct tcphdr *) | ||
498 | (((unsigned char *)iph) + (iph->ihl<<2)); | ||
499 | if (((unsigned char *)(tcph+1)) >= skb_data_end) { | ||
500 | printk(KERN_INFO "divert: malformed TCP packet !\n"); | ||
501 | return; | ||
502 | } | ||
503 | |||
504 | /* Divert some tcp dst/src ports only ?*/ | ||
505 | for (i = 0; i < MAX_DIVERT_PORTS; i++) { | ||
506 | dst = divert->tcp_dst[i]; | ||
507 | src = divert->tcp_src[i]; | ||
508 | if ((dst && dst == tcph->dest) || | ||
509 | (src && src == tcph->source)) { | ||
510 | ETH_DIVERT_FRAME(skb); | ||
511 | return; | ||
512 | } | ||
513 | } | ||
514 | break; | ||
515 | |||
516 | /* Divert all UDP frames ? */ | ||
517 | case IPPROTO_UDP: | ||
518 | if (divert->protos & DIVERT_PROTO_UDP) { | ||
519 | ETH_DIVERT_FRAME(skb); | ||
520 | return; | ||
521 | } | ||
522 | |||
523 | /* Check for possible (maliciously) malformed IP | ||
524 | * packet (thanks Dave) | ||
525 | */ | ||
526 | udph = (struct udphdr *) | ||
527 | (((unsigned char *)iph) + (iph->ihl<<2)); | ||
528 | if (((unsigned char *)(udph+1)) >= skb_data_end) { | ||
529 | printk(KERN_INFO | ||
530 | "divert: malformed UDP packet !\n"); | ||
531 | return; | ||
532 | } | ||
533 | |||
534 | /* Divert some udp dst/src ports only ? */ | ||
535 | for (i = 0; i < MAX_DIVERT_PORTS; i++) { | ||
536 | dst = divert->udp_dst[i]; | ||
537 | src = divert->udp_src[i]; | ||
538 | if ((dst && dst == udph->dest) || | ||
539 | (src && src == udph->source)) { | ||
540 | ETH_DIVERT_FRAME(skb); | ||
541 | return; | ||
542 | } | ||
543 | } | ||
544 | break; | ||
545 | } | ||
546 | } | ||
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 6b0e63cacd93..1df6cd4568d3 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -107,6 +107,22 @@ out: | |||
107 | 107 | ||
108 | EXPORT_SYMBOL_GPL(fib_rules_unregister); | 108 | EXPORT_SYMBOL_GPL(fib_rules_unregister); |
109 | 109 | ||
110 | static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, | ||
111 | struct flowi *fl, int flags) | ||
112 | { | ||
113 | int ret = 0; | ||
114 | |||
115 | if (rule->ifindex && (rule->ifindex != fl->iif)) | ||
116 | goto out; | ||
117 | |||
118 | if ((rule->mark ^ fl->mark) & rule->mark_mask) | ||
119 | goto out; | ||
120 | |||
121 | ret = ops->match(rule, fl, flags); | ||
122 | out: | ||
123 | return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; | ||
124 | } | ||
125 | |||
110 | int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, | 126 | int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, |
111 | int flags, struct fib_lookup_arg *arg) | 127 | int flags, struct fib_lookup_arg *arg) |
112 | { | 128 | { |
@@ -116,10 +132,7 @@ int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, | |||
116 | rcu_read_lock(); | 132 | rcu_read_lock(); |
117 | 133 | ||
118 | list_for_each_entry_rcu(rule, ops->rules_list, list) { | 134 | list_for_each_entry_rcu(rule, ops->rules_list, list) { |
119 | if (rule->ifindex && (rule->ifindex != fl->iif)) | 135 | if (!fib_rule_match(rule, ops, fl, flags)) |
120 | continue; | ||
121 | |||
122 | if (!ops->match(rule, fl, flags)) | ||
123 | continue; | 136 | continue; |
124 | 137 | ||
125 | err = ops->action(rule, fl, flags, arg); | 138 | err = ops->action(rule, fl, flags, arg); |
@@ -179,6 +192,18 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
179 | rule->ifindex = dev->ifindex; | 192 | rule->ifindex = dev->ifindex; |
180 | } | 193 | } |
181 | 194 | ||
195 | if (tb[FRA_FWMARK]) { | ||
196 | rule->mark = nla_get_u32(tb[FRA_FWMARK]); | ||
197 | if (rule->mark) | ||
198 | /* compatibility: if the mark value is non-zero all bits | ||
199 | * are compared unless a mask is explicitly specified. | ||
200 | */ | ||
201 | rule->mark_mask = 0xFFFFFFFF; | ||
202 | } | ||
203 | |||
204 | if (tb[FRA_FWMASK]) | ||
205 | rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); | ||
206 | |||
182 | rule->action = frh->action; | 207 | rule->action = frh->action; |
183 | rule->flags = frh->flags; | 208 | rule->flags = frh->flags; |
184 | rule->table = frh_get_table(frh, tb); | 209 | rule->table = frh_get_table(frh, tb); |
@@ -250,6 +275,14 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
250 | nla_strcmp(tb[FRA_IFNAME], rule->ifname)) | 275 | nla_strcmp(tb[FRA_IFNAME], rule->ifname)) |
251 | continue; | 276 | continue; |
252 | 277 | ||
278 | if (tb[FRA_FWMARK] && | ||
279 | (rule->mark != nla_get_u32(tb[FRA_FWMARK]))) | ||
280 | continue; | ||
281 | |||
282 | if (tb[FRA_FWMASK] && | ||
283 | (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK]))) | ||
284 | continue; | ||
285 | |||
253 | if (!ops->compare(rule, frh, tb)) | 286 | if (!ops->compare(rule, frh, tb)) |
254 | continue; | 287 | continue; |
255 | 288 | ||
@@ -273,6 +306,22 @@ errout: | |||
273 | return err; | 306 | return err; |
274 | } | 307 | } |
275 | 308 | ||
309 | static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, | ||
310 | struct fib_rule *rule) | ||
311 | { | ||
312 | size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)) | ||
313 | + nla_total_size(IFNAMSIZ) /* FRA_IFNAME */ | ||
314 | + nla_total_size(4) /* FRA_PRIORITY */ | ||
315 | + nla_total_size(4) /* FRA_TABLE */ | ||
316 | + nla_total_size(4) /* FRA_FWMARK */ | ||
317 | + nla_total_size(4); /* FRA_FWMASK */ | ||
318 | |||
319 | if (ops->nlmsg_payload) | ||
320 | payload += ops->nlmsg_payload(rule); | ||
321 | |||
322 | return payload; | ||
323 | } | ||
324 | |||
276 | static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, | 325 | static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, |
277 | u32 pid, u32 seq, int type, int flags, | 326 | u32 pid, u32 seq, int type, int flags, |
278 | struct fib_rules_ops *ops) | 327 | struct fib_rules_ops *ops) |
@@ -298,6 +347,12 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, | |||
298 | if (rule->pref) | 347 | if (rule->pref) |
299 | NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref); | 348 | NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref); |
300 | 349 | ||
350 | if (rule->mark) | ||
351 | NLA_PUT_U32(skb, FRA_FWMARK, rule->mark); | ||
352 | |||
353 | if (rule->mark_mask || rule->mark) | ||
354 | NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask); | ||
355 | |||
301 | if (ops->fill(rule, skb, nlh, frh) < 0) | 356 | if (ops->fill(rule, skb, nlh, frh) < 0) |
302 | goto nla_put_failure; | 357 | goto nla_put_failure; |
303 | 358 | ||
@@ -345,15 +400,13 @@ static void notify_rule_change(int event, struct fib_rule *rule, | |||
345 | struct sk_buff *skb; | 400 | struct sk_buff *skb; |
346 | int err = -ENOBUFS; | 401 | int err = -ENOBUFS; |
347 | 402 | ||
348 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | 403 | skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); |
349 | if (skb == NULL) | 404 | if (skb == NULL) |
350 | goto errout; | 405 | goto errout; |
351 | 406 | ||
352 | err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); | 407 | err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); |
353 | if (err < 0) { | 408 | /* failure implies BUG in fib_rule_nlmsg_size() */ |
354 | kfree_skb(skb); | 409 | BUG_ON(err < 0); |
355 | goto errout; | ||
356 | } | ||
357 | 410 | ||
358 | err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL); | 411 | err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL); |
359 | errout: | 412 | errout: |
diff --git a/net/core/filter.c b/net/core/filter.c index 6732782a5a40..0df843b667f4 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -178,7 +178,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int | |||
178 | load_w: | 178 | load_w: |
179 | ptr = load_pointer(skb, k, 4, &tmp); | 179 | ptr = load_pointer(skb, k, 4, &tmp); |
180 | if (ptr != NULL) { | 180 | if (ptr != NULL) { |
181 | A = ntohl(get_unaligned((u32 *)ptr)); | 181 | A = ntohl(get_unaligned((__be32 *)ptr)); |
182 | continue; | 182 | continue; |
183 | } | 183 | } |
184 | break; | 184 | break; |
@@ -187,7 +187,7 @@ load_w: | |||
187 | load_h: | 187 | load_h: |
188 | ptr = load_pointer(skb, k, 2, &tmp); | 188 | ptr = load_pointer(skb, k, 2, &tmp); |
189 | if (ptr != NULL) { | 189 | if (ptr != NULL) { |
190 | A = ntohs(get_unaligned((u16 *)ptr)); | 190 | A = ntohs(get_unaligned((__be16 *)ptr)); |
191 | continue; | 191 | continue; |
192 | } | 192 | } |
193 | break; | 193 | break; |
@@ -261,7 +261,7 @@ load_b: | |||
261 | */ | 261 | */ |
262 | switch (k-SKF_AD_OFF) { | 262 | switch (k-SKF_AD_OFF) { |
263 | case SKF_AD_PROTOCOL: | 263 | case SKF_AD_PROTOCOL: |
264 | A = htons(skb->protocol); | 264 | A = ntohs(skb->protocol); |
265 | continue; | 265 | continue; |
266 | case SKF_AD_PKTTYPE: | 266 | case SKF_AD_PKTTYPE: |
267 | A = skb->pkt_type; | 267 | A = skb->pkt_type; |
diff --git a/net/core/iovec.c b/net/core/iovec.c index 65e4b56fbc77..04b249c40b5b 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c | |||
@@ -158,9 +158,9 @@ int memcpy_fromiovecend(unsigned char *kdata, struct iovec *iov, int offset, | |||
158 | * call to this function will be unaligned also. | 158 | * call to this function will be unaligned also. |
159 | */ | 159 | */ |
160 | int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, | 160 | int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, |
161 | int offset, unsigned int len, int *csump) | 161 | int offset, unsigned int len, __wsum *csump) |
162 | { | 162 | { |
163 | int csum = *csump; | 163 | __wsum csum = *csump; |
164 | int partial_cnt = 0, err = 0; | 164 | int partial_cnt = 0, err = 0; |
165 | 165 | ||
166 | /* Skip over the finished iovecs */ | 166 | /* Skip over the finished iovecs */ |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index b4b478353b27..ba509a4a8e92 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1266,10 +1266,9 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, | |||
1266 | struct neigh_parms *neigh_parms_alloc(struct net_device *dev, | 1266 | struct neigh_parms *neigh_parms_alloc(struct net_device *dev, |
1267 | struct neigh_table *tbl) | 1267 | struct neigh_table *tbl) |
1268 | { | 1268 | { |
1269 | struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL); | 1269 | struct neigh_parms *p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL); |
1270 | 1270 | ||
1271 | if (p) { | 1271 | if (p) { |
1272 | memcpy(p, &tbl->parms, sizeof(*p)); | ||
1273 | p->tbl = tbl; | 1272 | p->tbl = tbl; |
1274 | atomic_set(&p->refcnt, 1); | 1273 | atomic_set(&p->refcnt, 1); |
1275 | INIT_RCU_HEAD(&p->rcu_head); | 1274 | INIT_RCU_HEAD(&p->rcu_head); |
@@ -2410,20 +2409,27 @@ static struct file_operations neigh_stat_seq_fops = { | |||
2410 | #endif /* CONFIG_PROC_FS */ | 2409 | #endif /* CONFIG_PROC_FS */ |
2411 | 2410 | ||
2412 | #ifdef CONFIG_ARPD | 2411 | #ifdef CONFIG_ARPD |
2412 | static inline size_t neigh_nlmsg_size(void) | ||
2413 | { | ||
2414 | return NLMSG_ALIGN(sizeof(struct ndmsg)) | ||
2415 | + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ | ||
2416 | + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */ | ||
2417 | + nla_total_size(sizeof(struct nda_cacheinfo)) | ||
2418 | + nla_total_size(4); /* NDA_PROBES */ | ||
2419 | } | ||
2420 | |||
2413 | static void __neigh_notify(struct neighbour *n, int type, int flags) | 2421 | static void __neigh_notify(struct neighbour *n, int type, int flags) |
2414 | { | 2422 | { |
2415 | struct sk_buff *skb; | 2423 | struct sk_buff *skb; |
2416 | int err = -ENOBUFS; | 2424 | int err = -ENOBUFS; |
2417 | 2425 | ||
2418 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); | 2426 | skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC); |
2419 | if (skb == NULL) | 2427 | if (skb == NULL) |
2420 | goto errout; | 2428 | goto errout; |
2421 | 2429 | ||
2422 | err = neigh_fill_info(skb, n, 0, 0, type, flags); | 2430 | err = neigh_fill_info(skb, n, 0, 0, type, flags); |
2423 | if (err < 0) { | 2431 | /* failure implies BUG in neigh_nlmsg_size() */ |
2424 | kfree_skb(skb); | 2432 | BUG_ON(err < 0); |
2425 | goto errout; | ||
2426 | } | ||
2427 | 2433 | ||
2428 | err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); | 2434 | err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); |
2429 | errout: | 2435 | errout: |
@@ -2618,14 +2624,14 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, | |||
2618 | int p_id, int pdev_id, char *p_name, | 2624 | int p_id, int pdev_id, char *p_name, |
2619 | proc_handler *handler, ctl_handler *strategy) | 2625 | proc_handler *handler, ctl_handler *strategy) |
2620 | { | 2626 | { |
2621 | struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL); | 2627 | struct neigh_sysctl_table *t = kmemdup(&neigh_sysctl_template, |
2628 | sizeof(*t), GFP_KERNEL); | ||
2622 | const char *dev_name_source = NULL; | 2629 | const char *dev_name_source = NULL; |
2623 | char *dev_name = NULL; | 2630 | char *dev_name = NULL; |
2624 | int err = 0; | 2631 | int err = 0; |
2625 | 2632 | ||
2626 | if (!t) | 2633 | if (!t) |
2627 | return -ENOBUFS; | 2634 | return -ENOBUFS; |
2628 | memcpy(t, &neigh_sysctl_template, sizeof(*t)); | ||
2629 | t->neigh_vars[0].data = &p->mcast_probes; | 2635 | t->neigh_vars[0].data = &p->mcast_probes; |
2630 | t->neigh_vars[1].data = &p->ucast_probes; | 2636 | t->neigh_vars[1].data = &p->ucast_probes; |
2631 | t->neigh_vars[2].data = &p->app_probes; | 2637 | t->neigh_vars[2].data = &p->app_probes; |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 63f24c914ddb..b3c559b9ac35 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -34,18 +34,12 @@ | |||
34 | #define MAX_UDP_CHUNK 1460 | 34 | #define MAX_UDP_CHUNK 1460 |
35 | #define MAX_SKBS 32 | 35 | #define MAX_SKBS 32 |
36 | #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) | 36 | #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) |
37 | #define MAX_RETRIES 20000 | ||
38 | 37 | ||
39 | static DEFINE_SPINLOCK(skb_list_lock); | 38 | static struct sk_buff_head skb_pool; |
40 | static int nr_skbs; | ||
41 | static struct sk_buff *skbs; | ||
42 | |||
43 | static DEFINE_SPINLOCK(queue_lock); | ||
44 | static int queue_depth; | ||
45 | static struct sk_buff *queue_head, *queue_tail; | ||
46 | 39 | ||
47 | static atomic_t trapped; | 40 | static atomic_t trapped; |
48 | 41 | ||
42 | #define USEC_PER_POLL 50 | ||
49 | #define NETPOLL_RX_ENABLED 1 | 43 | #define NETPOLL_RX_ENABLED 1 |
50 | #define NETPOLL_RX_DROP 2 | 44 | #define NETPOLL_RX_DROP 2 |
51 | 45 | ||
@@ -58,52 +52,34 @@ static void arp_reply(struct sk_buff *skb); | |||
58 | 52 | ||
59 | static void queue_process(struct work_struct *work) | 53 | static void queue_process(struct work_struct *work) |
60 | { | 54 | { |
61 | unsigned long flags; | 55 | struct netpoll_info *npinfo = |
56 | container_of(work, struct netpoll_info, tx_work.work); | ||
62 | struct sk_buff *skb; | 57 | struct sk_buff *skb; |
63 | 58 | ||
64 | while (queue_head) { | 59 | while ((skb = skb_dequeue(&npinfo->txq))) { |
65 | spin_lock_irqsave(&queue_lock, flags); | 60 | struct net_device *dev = skb->dev; |
66 | |||
67 | skb = queue_head; | ||
68 | queue_head = skb->next; | ||
69 | if (skb == queue_tail) | ||
70 | queue_head = NULL; | ||
71 | |||
72 | queue_depth--; | ||
73 | |||
74 | spin_unlock_irqrestore(&queue_lock, flags); | ||
75 | |||
76 | dev_queue_xmit(skb); | ||
77 | } | ||
78 | } | ||
79 | 61 | ||
80 | static DECLARE_WORK(send_queue, queue_process); | 62 | if (!netif_device_present(dev) || !netif_running(dev)) { |
63 | __kfree_skb(skb); | ||
64 | continue; | ||
65 | } | ||
81 | 66 | ||
82 | void netpoll_queue(struct sk_buff *skb) | 67 | netif_tx_lock_bh(dev); |
83 | { | 68 | if (netif_queue_stopped(dev) || |
84 | unsigned long flags; | 69 | dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { |
70 | skb_queue_head(&npinfo->txq, skb); | ||
71 | netif_tx_unlock_bh(dev); | ||
85 | 72 | ||
86 | if (queue_depth == MAX_QUEUE_DEPTH) { | 73 | schedule_delayed_work(&npinfo->tx_work, HZ/10); |
87 | __kfree_skb(skb); | 74 | return; |
88 | return; | 75 | } |
89 | } | 76 | } |
90 | |||
91 | spin_lock_irqsave(&queue_lock, flags); | ||
92 | if (!queue_head) | ||
93 | queue_head = skb; | ||
94 | else | ||
95 | queue_tail->next = skb; | ||
96 | queue_tail = skb; | ||
97 | queue_depth++; | ||
98 | spin_unlock_irqrestore(&queue_lock, flags); | ||
99 | |||
100 | schedule_work(&send_queue); | ||
101 | } | 77 | } |
102 | 78 | ||
103 | static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, | 79 | static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, |
104 | unsigned short ulen, u32 saddr, u32 daddr) | 80 | unsigned short ulen, __be32 saddr, __be32 daddr) |
105 | { | 81 | { |
106 | unsigned int psum; | 82 | __wsum psum; |
107 | 83 | ||
108 | if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY) | 84 | if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY) |
109 | return 0; | 85 | return 0; |
@@ -111,7 +87,7 @@ static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, | |||
111 | psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); | 87 | psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); |
112 | 88 | ||
113 | if (skb->ip_summed == CHECKSUM_COMPLETE && | 89 | if (skb->ip_summed == CHECKSUM_COMPLETE && |
114 | !(u16)csum_fold(csum_add(psum, skb->csum))) | 90 | !csum_fold(csum_add(psum, skb->csum))) |
115 | return 0; | 91 | return 0; |
116 | 92 | ||
117 | skb->csum = psum; | 93 | skb->csum = psum; |
@@ -167,12 +143,11 @@ static void service_arp_queue(struct netpoll_info *npi) | |||
167 | arp_reply(skb); | 143 | arp_reply(skb); |
168 | skb = skb_dequeue(&npi->arp_tx); | 144 | skb = skb_dequeue(&npi->arp_tx); |
169 | } | 145 | } |
170 | return; | ||
171 | } | 146 | } |
172 | 147 | ||
173 | void netpoll_poll(struct netpoll *np) | 148 | void netpoll_poll(struct netpoll *np) |
174 | { | 149 | { |
175 | if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller) | 150 | if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller) |
176 | return; | 151 | return; |
177 | 152 | ||
178 | /* Process pending work on NIC */ | 153 | /* Process pending work on NIC */ |
@@ -190,17 +165,15 @@ static void refill_skbs(void) | |||
190 | struct sk_buff *skb; | 165 | struct sk_buff *skb; |
191 | unsigned long flags; | 166 | unsigned long flags; |
192 | 167 | ||
193 | spin_lock_irqsave(&skb_list_lock, flags); | 168 | spin_lock_irqsave(&skb_pool.lock, flags); |
194 | while (nr_skbs < MAX_SKBS) { | 169 | while (skb_pool.qlen < MAX_SKBS) { |
195 | skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); | 170 | skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); |
196 | if (!skb) | 171 | if (!skb) |
197 | break; | 172 | break; |
198 | 173 | ||
199 | skb->next = skbs; | 174 | __skb_queue_tail(&skb_pool, skb); |
200 | skbs = skb; | ||
201 | nr_skbs++; | ||
202 | } | 175 | } |
203 | spin_unlock_irqrestore(&skb_list_lock, flags); | 176 | spin_unlock_irqrestore(&skb_pool.lock, flags); |
204 | } | 177 | } |
205 | 178 | ||
206 | static void zap_completion_queue(void) | 179 | static void zap_completion_queue(void) |
@@ -219,7 +192,7 @@ static void zap_completion_queue(void) | |||
219 | while (clist != NULL) { | 192 | while (clist != NULL) { |
220 | struct sk_buff *skb = clist; | 193 | struct sk_buff *skb = clist; |
221 | clist = clist->next; | 194 | clist = clist->next; |
222 | if(skb->destructor) | 195 | if (skb->destructor) |
223 | dev_kfree_skb_any(skb); /* put this one back */ | 196 | dev_kfree_skb_any(skb); /* put this one back */ |
224 | else | 197 | else |
225 | __kfree_skb(skb); | 198 | __kfree_skb(skb); |
@@ -229,38 +202,25 @@ static void zap_completion_queue(void) | |||
229 | put_cpu_var(softnet_data); | 202 | put_cpu_var(softnet_data); |
230 | } | 203 | } |
231 | 204 | ||
232 | static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve) | 205 | static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) |
233 | { | 206 | { |
234 | int once = 1, count = 0; | 207 | int count = 0; |
235 | unsigned long flags; | 208 | struct sk_buff *skb; |
236 | struct sk_buff *skb = NULL; | ||
237 | 209 | ||
238 | zap_completion_queue(); | 210 | zap_completion_queue(); |
211 | refill_skbs(); | ||
239 | repeat: | 212 | repeat: |
240 | if (nr_skbs < MAX_SKBS) | ||
241 | refill_skbs(); | ||
242 | 213 | ||
243 | skb = alloc_skb(len, GFP_ATOMIC); | 214 | skb = alloc_skb(len, GFP_ATOMIC); |
215 | if (!skb) | ||
216 | skb = skb_dequeue(&skb_pool); | ||
244 | 217 | ||
245 | if (!skb) { | 218 | if (!skb) { |
246 | spin_lock_irqsave(&skb_list_lock, flags); | 219 | if (++count < 10) { |
247 | skb = skbs; | 220 | netpoll_poll(np); |
248 | if (skb) { | 221 | goto repeat; |
249 | skbs = skb->next; | ||
250 | skb->next = NULL; | ||
251 | nr_skbs--; | ||
252 | } | 222 | } |
253 | spin_unlock_irqrestore(&skb_list_lock, flags); | 223 | return NULL; |
254 | } | ||
255 | |||
256 | if(!skb) { | ||
257 | count++; | ||
258 | if (once && (count == 1000000)) { | ||
259 | printk("out of netpoll skbs!\n"); | ||
260 | once = 0; | ||
261 | } | ||
262 | netpoll_poll(np); | ||
263 | goto repeat; | ||
264 | } | 224 | } |
265 | 225 | ||
266 | atomic_set(&skb->users, 1); | 226 | atomic_set(&skb->users, 1); |
@@ -270,50 +230,40 @@ repeat: | |||
270 | 230 | ||
271 | static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | 231 | static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) |
272 | { | 232 | { |
273 | int status; | 233 | int status = NETDEV_TX_BUSY; |
274 | struct netpoll_info *npinfo; | 234 | unsigned long tries; |
275 | 235 | struct net_device *dev = np->dev; | |
276 | if (!np || !np->dev || !netif_running(np->dev)) { | 236 | struct netpoll_info *npinfo = np->dev->npinfo; |
277 | __kfree_skb(skb); | 237 | |
278 | return; | 238 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { |
279 | } | 239 | __kfree_skb(skb); |
280 | 240 | return; | |
281 | npinfo = np->dev->npinfo; | 241 | } |
282 | 242 | ||
283 | /* avoid recursion */ | 243 | /* don't get messages out of order, and no recursion */ |
284 | if (npinfo->poll_owner == smp_processor_id() || | 244 | if (skb_queue_len(&npinfo->txq) == 0 && |
285 | np->dev->xmit_lock_owner == smp_processor_id()) { | 245 | npinfo->poll_owner != smp_processor_id() && |
286 | if (np->drop) | 246 | netif_tx_trylock(dev)) { |
287 | np->drop(skb); | 247 | /* try until next clock tick */ |
288 | else | 248 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { |
289 | __kfree_skb(skb); | 249 | if (!netif_queue_stopped(dev)) |
290 | return; | 250 | status = dev->hard_start_xmit(skb, dev); |
291 | } | ||
292 | |||
293 | do { | ||
294 | npinfo->tries--; | ||
295 | netif_tx_lock(np->dev); | ||
296 | 251 | ||
297 | /* | 252 | if (status == NETDEV_TX_OK) |
298 | * network drivers do not expect to be called if the queue is | 253 | break; |
299 | * stopped. | ||
300 | */ | ||
301 | status = NETDEV_TX_BUSY; | ||
302 | if (!netif_queue_stopped(np->dev)) | ||
303 | status = np->dev->hard_start_xmit(skb, np->dev); | ||
304 | 254 | ||
305 | netif_tx_unlock(np->dev); | 255 | /* tickle device maybe there is some cleanup */ |
256 | netpoll_poll(np); | ||
306 | 257 | ||
307 | /* success */ | 258 | udelay(USEC_PER_POLL); |
308 | if(!status) { | ||
309 | npinfo->tries = MAX_RETRIES; /* reset */ | ||
310 | return; | ||
311 | } | 259 | } |
260 | netif_tx_unlock(dev); | ||
261 | } | ||
312 | 262 | ||
313 | /* transmit busy */ | 263 | if (status != NETDEV_TX_OK) { |
314 | netpoll_poll(np); | 264 | skb_queue_tail(&npinfo->txq, skb); |
315 | udelay(50); | 265 | schedule_delayed_work(&npinfo->tx_work,0); |
316 | } while (npinfo->tries > 0); | 266 | } |
317 | } | 267 | } |
318 | 268 | ||
319 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) | 269 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) |
@@ -345,7 +295,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) | |||
345 | udp_len, IPPROTO_UDP, | 295 | udp_len, IPPROTO_UDP, |
346 | csum_partial((unsigned char *)udph, udp_len, 0)); | 296 | csum_partial((unsigned char *)udph, udp_len, 0)); |
347 | if (udph->check == 0) | 297 | if (udph->check == 0) |
348 | udph->check = -1; | 298 | udph->check = CSUM_MANGLED_0; |
349 | 299 | ||
350 | skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph)); | 300 | skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph)); |
351 | 301 | ||
@@ -379,7 +329,7 @@ static void arp_reply(struct sk_buff *skb) | |||
379 | struct arphdr *arp; | 329 | struct arphdr *arp; |
380 | unsigned char *arp_ptr; | 330 | unsigned char *arp_ptr; |
381 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; | 331 | int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; |
382 | u32 sip, tip; | 332 | __be32 sip, tip; |
383 | struct sk_buff *send_skb; | 333 | struct sk_buff *send_skb; |
384 | struct netpoll *np = NULL; | 334 | struct netpoll *np = NULL; |
385 | 335 | ||
@@ -431,8 +381,8 @@ static void arp_reply(struct sk_buff *skb) | |||
431 | 381 | ||
432 | if (np->dev->hard_header && | 382 | if (np->dev->hard_header && |
433 | np->dev->hard_header(send_skb, skb->dev, ptype, | 383 | np->dev->hard_header(send_skb, skb->dev, ptype, |
434 | np->remote_mac, np->local_mac, | 384 | np->remote_mac, np->local_mac, |
435 | send_skb->len) < 0) { | 385 | send_skb->len) < 0) { |
436 | kfree_skb(send_skb); | 386 | kfree_skb(send_skb); |
437 | return; | 387 | return; |
438 | } | 388 | } |
@@ -470,7 +420,6 @@ int __netpoll_rx(struct sk_buff *skb) | |||
470 | struct netpoll_info *npi = skb->dev->npinfo; | 420 | struct netpoll_info *npi = skb->dev->npinfo; |
471 | struct netpoll *np = npi->rx_np; | 421 | struct netpoll *np = npi->rx_np; |
472 | 422 | ||
473 | |||
474 | if (!np) | 423 | if (!np) |
475 | goto out; | 424 | goto out; |
476 | if (skb->dev->type != ARPHRD_ETHER) | 425 | if (skb->dev->type != ARPHRD_ETHER) |
@@ -543,47 +492,47 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
543 | { | 492 | { |
544 | char *cur=opt, *delim; | 493 | char *cur=opt, *delim; |
545 | 494 | ||
546 | if(*cur != '@') { | 495 | if (*cur != '@') { |
547 | if ((delim = strchr(cur, '@')) == NULL) | 496 | if ((delim = strchr(cur, '@')) == NULL) |
548 | goto parse_failed; | 497 | goto parse_failed; |
549 | *delim=0; | 498 | *delim = 0; |
550 | np->local_port=simple_strtol(cur, NULL, 10); | 499 | np->local_port = simple_strtol(cur, NULL, 10); |
551 | cur=delim; | 500 | cur = delim; |
552 | } | 501 | } |
553 | cur++; | 502 | cur++; |
554 | printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port); | 503 | printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port); |
555 | 504 | ||
556 | if(*cur != '/') { | 505 | if (*cur != '/') { |
557 | if ((delim = strchr(cur, '/')) == NULL) | 506 | if ((delim = strchr(cur, '/')) == NULL) |
558 | goto parse_failed; | 507 | goto parse_failed; |
559 | *delim=0; | 508 | *delim = 0; |
560 | np->local_ip=ntohl(in_aton(cur)); | 509 | np->local_ip = ntohl(in_aton(cur)); |
561 | cur=delim; | 510 | cur = delim; |
562 | 511 | ||
563 | printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", | 512 | printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", |
564 | np->name, HIPQUAD(np->local_ip)); | 513 | np->name, HIPQUAD(np->local_ip)); |
565 | } | 514 | } |
566 | cur++; | 515 | cur++; |
567 | 516 | ||
568 | if ( *cur != ',') { | 517 | if (*cur != ',') { |
569 | /* parse out dev name */ | 518 | /* parse out dev name */ |
570 | if ((delim = strchr(cur, ',')) == NULL) | 519 | if ((delim = strchr(cur, ',')) == NULL) |
571 | goto parse_failed; | 520 | goto parse_failed; |
572 | *delim=0; | 521 | *delim = 0; |
573 | strlcpy(np->dev_name, cur, sizeof(np->dev_name)); | 522 | strlcpy(np->dev_name, cur, sizeof(np->dev_name)); |
574 | cur=delim; | 523 | cur = delim; |
575 | } | 524 | } |
576 | cur++; | 525 | cur++; |
577 | 526 | ||
578 | printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name); | 527 | printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name); |
579 | 528 | ||
580 | if ( *cur != '@' ) { | 529 | if (*cur != '@') { |
581 | /* dst port */ | 530 | /* dst port */ |
582 | if ((delim = strchr(cur, '@')) == NULL) | 531 | if ((delim = strchr(cur, '@')) == NULL) |
583 | goto parse_failed; | 532 | goto parse_failed; |
584 | *delim=0; | 533 | *delim = 0; |
585 | np->remote_port=simple_strtol(cur, NULL, 10); | 534 | np->remote_port = simple_strtol(cur, NULL, 10); |
586 | cur=delim; | 535 | cur = delim; |
587 | } | 536 | } |
588 | cur++; | 537 | cur++; |
589 | printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port); | 538 | printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port); |
@@ -591,42 +540,41 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
591 | /* dst ip */ | 540 | /* dst ip */ |
592 | if ((delim = strchr(cur, '/')) == NULL) | 541 | if ((delim = strchr(cur, '/')) == NULL) |
593 | goto parse_failed; | 542 | goto parse_failed; |
594 | *delim=0; | 543 | *delim = 0; |
595 | np->remote_ip=ntohl(in_aton(cur)); | 544 | np->remote_ip = ntohl(in_aton(cur)); |
596 | cur=delim+1; | 545 | cur = delim + 1; |
597 | 546 | ||
598 | printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n", | 547 | printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n", |
599 | np->name, HIPQUAD(np->remote_ip)); | 548 | np->name, HIPQUAD(np->remote_ip)); |
600 | 549 | ||
601 | if( *cur != 0 ) | 550 | if (*cur != 0) { |
602 | { | ||
603 | /* MAC address */ | 551 | /* MAC address */ |
604 | if ((delim = strchr(cur, ':')) == NULL) | 552 | if ((delim = strchr(cur, ':')) == NULL) |
605 | goto parse_failed; | 553 | goto parse_failed; |
606 | *delim=0; | 554 | *delim = 0; |
607 | np->remote_mac[0]=simple_strtol(cur, NULL, 16); | 555 | np->remote_mac[0] = simple_strtol(cur, NULL, 16); |
608 | cur=delim+1; | 556 | cur = delim + 1; |
609 | if ((delim = strchr(cur, ':')) == NULL) | 557 | if ((delim = strchr(cur, ':')) == NULL) |
610 | goto parse_failed; | 558 | goto parse_failed; |
611 | *delim=0; | 559 | *delim = 0; |
612 | np->remote_mac[1]=simple_strtol(cur, NULL, 16); | 560 | np->remote_mac[1] = simple_strtol(cur, NULL, 16); |
613 | cur=delim+1; | 561 | cur = delim + 1; |
614 | if ((delim = strchr(cur, ':')) == NULL) | 562 | if ((delim = strchr(cur, ':')) == NULL) |
615 | goto parse_failed; | 563 | goto parse_failed; |
616 | *delim=0; | 564 | *delim = 0; |
617 | np->remote_mac[2]=simple_strtol(cur, NULL, 16); | 565 | np->remote_mac[2] = simple_strtol(cur, NULL, 16); |
618 | cur=delim+1; | 566 | cur = delim + 1; |
619 | if ((delim = strchr(cur, ':')) == NULL) | 567 | if ((delim = strchr(cur, ':')) == NULL) |
620 | goto parse_failed; | 568 | goto parse_failed; |
621 | *delim=0; | 569 | *delim = 0; |
622 | np->remote_mac[3]=simple_strtol(cur, NULL, 16); | 570 | np->remote_mac[3] = simple_strtol(cur, NULL, 16); |
623 | cur=delim+1; | 571 | cur = delim + 1; |
624 | if ((delim = strchr(cur, ':')) == NULL) | 572 | if ((delim = strchr(cur, ':')) == NULL) |
625 | goto parse_failed; | 573 | goto parse_failed; |
626 | *delim=0; | 574 | *delim = 0; |
627 | np->remote_mac[4]=simple_strtol(cur, NULL, 16); | 575 | np->remote_mac[4] = simple_strtol(cur, NULL, 16); |
628 | cur=delim+1; | 576 | cur = delim + 1; |
629 | np->remote_mac[5]=simple_strtol(cur, NULL, 16); | 577 | np->remote_mac[5] = simple_strtol(cur, NULL, 16); |
630 | } | 578 | } |
631 | 579 | ||
632 | printk(KERN_INFO "%s: remote ethernet address " | 580 | printk(KERN_INFO "%s: remote ethernet address " |
@@ -653,34 +601,44 @@ int netpoll_setup(struct netpoll *np) | |||
653 | struct in_device *in_dev; | 601 | struct in_device *in_dev; |
654 | struct netpoll_info *npinfo; | 602 | struct netpoll_info *npinfo; |
655 | unsigned long flags; | 603 | unsigned long flags; |
604 | int err; | ||
656 | 605 | ||
657 | if (np->dev_name) | 606 | if (np->dev_name) |
658 | ndev = dev_get_by_name(np->dev_name); | 607 | ndev = dev_get_by_name(np->dev_name); |
659 | if (!ndev) { | 608 | if (!ndev) { |
660 | printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", | 609 | printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", |
661 | np->name, np->dev_name); | 610 | np->name, np->dev_name); |
662 | return -1; | 611 | return -ENODEV; |
663 | } | 612 | } |
664 | 613 | ||
665 | np->dev = ndev; | 614 | np->dev = ndev; |
666 | if (!ndev->npinfo) { | 615 | if (!ndev->npinfo) { |
667 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); | 616 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); |
668 | if (!npinfo) | 617 | if (!npinfo) { |
618 | err = -ENOMEM; | ||
669 | goto release; | 619 | goto release; |
620 | } | ||
670 | 621 | ||
671 | npinfo->rx_flags = 0; | 622 | npinfo->rx_flags = 0; |
672 | npinfo->rx_np = NULL; | 623 | npinfo->rx_np = NULL; |
673 | spin_lock_init(&npinfo->poll_lock); | 624 | spin_lock_init(&npinfo->poll_lock); |
674 | npinfo->poll_owner = -1; | 625 | npinfo->poll_owner = -1; |
675 | npinfo->tries = MAX_RETRIES; | 626 | |
676 | spin_lock_init(&npinfo->rx_lock); | 627 | spin_lock_init(&npinfo->rx_lock); |
677 | skb_queue_head_init(&npinfo->arp_tx); | 628 | skb_queue_head_init(&npinfo->arp_tx); |
678 | } else | 629 | skb_queue_head_init(&npinfo->txq); |
630 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); | ||
631 | |||
632 | atomic_set(&npinfo->refcnt, 1); | ||
633 | } else { | ||
679 | npinfo = ndev->npinfo; | 634 | npinfo = ndev->npinfo; |
635 | atomic_inc(&npinfo->refcnt); | ||
636 | } | ||
680 | 637 | ||
681 | if (!ndev->poll_controller) { | 638 | if (!ndev->poll_controller) { |
682 | printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", | 639 | printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", |
683 | np->name, np->dev_name); | 640 | np->name, np->dev_name); |
641 | err = -ENOTSUPP; | ||
684 | goto release; | 642 | goto release; |
685 | } | 643 | } |
686 | 644 | ||
@@ -691,13 +649,14 @@ int netpoll_setup(struct netpoll *np) | |||
691 | np->name, np->dev_name); | 649 | np->name, np->dev_name); |
692 | 650 | ||
693 | rtnl_lock(); | 651 | rtnl_lock(); |
694 | if (dev_change_flags(ndev, ndev->flags | IFF_UP) < 0) { | 652 | err = dev_open(ndev); |
653 | rtnl_unlock(); | ||
654 | |||
655 | if (err) { | ||
695 | printk(KERN_ERR "%s: failed to open %s\n", | 656 | printk(KERN_ERR "%s: failed to open %s\n", |
696 | np->name, np->dev_name); | 657 | np->name, ndev->name); |
697 | rtnl_unlock(); | ||
698 | goto release; | 658 | goto release; |
699 | } | 659 | } |
700 | rtnl_unlock(); | ||
701 | 660 | ||
702 | atleast = jiffies + HZ/10; | 661 | atleast = jiffies + HZ/10; |
703 | atmost = jiffies + 4*HZ; | 662 | atmost = jiffies + 4*HZ; |
@@ -735,6 +694,7 @@ int netpoll_setup(struct netpoll *np) | |||
735 | rcu_read_unlock(); | 694 | rcu_read_unlock(); |
736 | printk(KERN_ERR "%s: no IP address for %s, aborting\n", | 695 | printk(KERN_ERR "%s: no IP address for %s, aborting\n", |
737 | np->name, np->dev_name); | 696 | np->name, np->dev_name); |
697 | err = -EDESTADDRREQ; | ||
738 | goto release; | 698 | goto release; |
739 | } | 699 | } |
740 | 700 | ||
@@ -767,9 +727,16 @@ int netpoll_setup(struct netpoll *np) | |||
767 | kfree(npinfo); | 727 | kfree(npinfo); |
768 | np->dev = NULL; | 728 | np->dev = NULL; |
769 | dev_put(ndev); | 729 | dev_put(ndev); |
770 | return -1; | 730 | return err; |
771 | } | 731 | } |
772 | 732 | ||
733 | static int __init netpoll_init(void) | ||
734 | { | ||
735 | skb_queue_head_init(&skb_pool); | ||
736 | return 0; | ||
737 | } | ||
738 | core_initcall(netpoll_init); | ||
739 | |||
773 | void netpoll_cleanup(struct netpoll *np) | 740 | void netpoll_cleanup(struct netpoll *np) |
774 | { | 741 | { |
775 | struct netpoll_info *npinfo; | 742 | struct netpoll_info *npinfo; |
@@ -777,12 +744,25 @@ void netpoll_cleanup(struct netpoll *np) | |||
777 | 744 | ||
778 | if (np->dev) { | 745 | if (np->dev) { |
779 | npinfo = np->dev->npinfo; | 746 | npinfo = np->dev->npinfo; |
780 | if (npinfo && npinfo->rx_np == np) { | 747 | if (npinfo) { |
781 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 748 | if (npinfo->rx_np == np) { |
782 | npinfo->rx_np = NULL; | 749 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
783 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; | 750 | npinfo->rx_np = NULL; |
784 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 751 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; |
752 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
753 | } | ||
754 | |||
755 | np->dev->npinfo = NULL; | ||
756 | if (atomic_dec_and_test(&npinfo->refcnt)) { | ||
757 | skb_queue_purge(&npinfo->arp_tx); | ||
758 | skb_queue_purge(&npinfo->txq); | ||
759 | cancel_rearming_delayed_work(&npinfo->tx_work); | ||
760 | flush_scheduled_work(); | ||
761 | |||
762 | kfree(npinfo); | ||
763 | } | ||
785 | } | 764 | } |
765 | |||
786 | dev_put(np->dev); | 766 | dev_put(np->dev); |
787 | } | 767 | } |
788 | 768 | ||
@@ -809,4 +789,3 @@ EXPORT_SYMBOL(netpoll_setup); | |||
809 | EXPORT_SYMBOL(netpoll_cleanup); | 789 | EXPORT_SYMBOL(netpoll_cleanup); |
810 | EXPORT_SYMBOL(netpoll_send_udp); | 790 | EXPORT_SYMBOL(netpoll_send_udp); |
811 | EXPORT_SYMBOL(netpoll_poll); | 791 | EXPORT_SYMBOL(netpoll_poll); |
812 | EXPORT_SYMBOL(netpoll_queue); | ||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 733d86d0a4fb..1897a3a385d8 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -207,7 +207,7 @@ static struct proc_dir_entry *pg_proc_dir = NULL; | |||
207 | #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4) | 207 | #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4) |
208 | 208 | ||
209 | struct flow_state { | 209 | struct flow_state { |
210 | __u32 cur_daddr; | 210 | __be32 cur_daddr; |
211 | int count; | 211 | int count; |
212 | }; | 212 | }; |
213 | 213 | ||
@@ -282,10 +282,10 @@ struct pktgen_dev { | |||
282 | /* If we're doing ranges, random or incremental, then this | 282 | /* If we're doing ranges, random or incremental, then this |
283 | * defines the min/max for those ranges. | 283 | * defines the min/max for those ranges. |
284 | */ | 284 | */ |
285 | __u32 saddr_min; /* inclusive, source IP address */ | 285 | __be32 saddr_min; /* inclusive, source IP address */ |
286 | __u32 saddr_max; /* exclusive, source IP address */ | 286 | __be32 saddr_max; /* exclusive, source IP address */ |
287 | __u32 daddr_min; /* inclusive, dest IP address */ | 287 | __be32 daddr_min; /* inclusive, dest IP address */ |
288 | __u32 daddr_max; /* exclusive, dest IP address */ | 288 | __be32 daddr_max; /* exclusive, dest IP address */ |
289 | 289 | ||
290 | __u16 udp_src_min; /* inclusive, source UDP port */ | 290 | __u16 udp_src_min; /* inclusive, source UDP port */ |
291 | __u16 udp_src_max; /* exclusive, source UDP port */ | 291 | __u16 udp_src_max; /* exclusive, source UDP port */ |
@@ -317,8 +317,8 @@ struct pktgen_dev { | |||
317 | 317 | ||
318 | __u32 cur_dst_mac_offset; | 318 | __u32 cur_dst_mac_offset; |
319 | __u32 cur_src_mac_offset; | 319 | __u32 cur_src_mac_offset; |
320 | __u32 cur_saddr; | 320 | __be32 cur_saddr; |
321 | __u32 cur_daddr; | 321 | __be32 cur_daddr; |
322 | __u16 cur_udp_dst; | 322 | __u16 cur_udp_dst; |
323 | __u16 cur_udp_src; | 323 | __u16 cur_udp_src; |
324 | __u32 cur_pkt_size; | 324 | __u32 cur_pkt_size; |
@@ -350,10 +350,10 @@ struct pktgen_dev { | |||
350 | }; | 350 | }; |
351 | 351 | ||
352 | struct pktgen_hdr { | 352 | struct pktgen_hdr { |
353 | __u32 pgh_magic; | 353 | __be32 pgh_magic; |
354 | __u32 seq_num; | 354 | __be32 seq_num; |
355 | __u32 tv_sec; | 355 | __be32 tv_sec; |
356 | __u32 tv_usec; | 356 | __be32 tv_usec; |
357 | }; | 357 | }; |
358 | 358 | ||
359 | struct pktgen_thread { | 359 | struct pktgen_thread { |
@@ -2160,7 +2160,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) | |||
2160 | for(i = 0; i < pkt_dev->nr_labels; i++) | 2160 | for(i = 0; i < pkt_dev->nr_labels; i++) |
2161 | if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) | 2161 | if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) |
2162 | pkt_dev->labels[i] = MPLS_STACK_BOTTOM | | 2162 | pkt_dev->labels[i] = MPLS_STACK_BOTTOM | |
2163 | (pktgen_random() & | 2163 | ((__force __be32)pktgen_random() & |
2164 | htonl(0x000fffff)); | 2164 | htonl(0x000fffff)); |
2165 | } | 2165 | } |
2166 | 2166 | ||
@@ -2220,29 +2220,25 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) | |||
2220 | if (pkt_dev->cflows && pkt_dev->flows[flow].count != 0) { | 2220 | if (pkt_dev->cflows && pkt_dev->flows[flow].count != 0) { |
2221 | pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; | 2221 | pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; |
2222 | } else { | 2222 | } else { |
2223 | 2223 | imn = ntohl(pkt_dev->daddr_min); | |
2224 | if ((imn = ntohl(pkt_dev->daddr_min)) < (imx = | 2224 | imx = ntohl(pkt_dev->daddr_max); |
2225 | ntohl(pkt_dev-> | 2225 | if (imn < imx) { |
2226 | daddr_max))) | ||
2227 | { | ||
2228 | __u32 t; | 2226 | __u32 t; |
2227 | __be32 s; | ||
2229 | if (pkt_dev->flags & F_IPDST_RND) { | 2228 | if (pkt_dev->flags & F_IPDST_RND) { |
2230 | 2229 | ||
2231 | t = ((pktgen_random() % (imx - imn)) + | 2230 | t = pktgen_random() % (imx - imn) + imn; |
2232 | imn); | 2231 | s = htonl(t); |
2233 | t = htonl(t); | ||
2234 | 2232 | ||
2235 | while (LOOPBACK(t) || MULTICAST(t) | 2233 | while (LOOPBACK(s) || MULTICAST(s) |
2236 | || BADCLASS(t) || ZERONET(t) | 2234 | || BADCLASS(s) || ZERONET(s) |
2237 | || LOCAL_MCAST(t)) { | 2235 | || LOCAL_MCAST(s)) { |
2238 | t = ((pktgen_random() % | 2236 | t = (pktgen_random() % |
2239 | (imx - imn)) + imn); | 2237 | (imx - imn)) + imn; |
2240 | t = htonl(t); | 2238 | s = htonl(t); |
2241 | } | 2239 | } |
2242 | pkt_dev->cur_daddr = t; | 2240 | pkt_dev->cur_daddr = s; |
2243 | } | 2241 | } else { |
2244 | |||
2245 | else { | ||
2246 | t = ntohl(pkt_dev->cur_daddr); | 2242 | t = ntohl(pkt_dev->cur_daddr); |
2247 | t++; | 2243 | t++; |
2248 | if (t > imx) { | 2244 | if (t > imx) { |
@@ -2270,7 +2266,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) | |||
2270 | 2266 | ||
2271 | for (i = 0; i < 4; i++) { | 2267 | for (i = 0; i < 4; i++) { |
2272 | pkt_dev->cur_in6_daddr.s6_addr32[i] = | 2268 | pkt_dev->cur_in6_daddr.s6_addr32[i] = |
2273 | ((pktgen_random() | | 2269 | (((__force __be32)pktgen_random() | |
2274 | pkt_dev->min_in6_daddr.s6_addr32[i]) & | 2270 | pkt_dev->min_in6_daddr.s6_addr32[i]) & |
2275 | pkt_dev->max_in6_daddr.s6_addr32[i]); | 2271 | pkt_dev->max_in6_daddr.s6_addr32[i]); |
2276 | } | 2272 | } |
@@ -2377,7 +2373,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, | |||
2377 | udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); | 2373 | udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); |
2378 | 2374 | ||
2379 | memcpy(eth, pkt_dev->hh, 12); | 2375 | memcpy(eth, pkt_dev->hh, 12); |
2380 | *(u16 *) & eth[12] = protocol; | 2376 | *(__be16 *) & eth[12] = protocol; |
2381 | 2377 | ||
2382 | /* Eth + IPh + UDPh + mpls */ | 2378 | /* Eth + IPh + UDPh + mpls */ |
2383 | datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - | 2379 | datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - |
@@ -2497,7 +2493,7 @@ static unsigned int scan_ip6(const char *s, char ip[16]) | |||
2497 | char suffix[16]; | 2493 | char suffix[16]; |
2498 | unsigned int prefixlen = 0; | 2494 | unsigned int prefixlen = 0; |
2499 | unsigned int suffixlen = 0; | 2495 | unsigned int suffixlen = 0; |
2500 | __u32 tmp; | 2496 | __be32 tmp; |
2501 | 2497 | ||
2502 | for (i = 0; i < 16; i++) | 2498 | for (i = 0; i < 16; i++) |
2503 | ip[i] = 0; | 2499 | ip[i] = 0; |
@@ -2713,7 +2709,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2713 | udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); | 2709 | udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); |
2714 | 2710 | ||
2715 | memcpy(eth, pkt_dev->hh, 12); | 2711 | memcpy(eth, pkt_dev->hh, 12); |
2716 | *(u16 *) & eth[12] = protocol; | 2712 | *(__be16 *) & eth[12] = protocol; |
2717 | 2713 | ||
2718 | /* Eth + IPh + UDPh + mpls */ | 2714 | /* Eth + IPh + UDPh + mpls */ |
2719 | datalen = pkt_dev->cur_pkt_size - 14 - | 2715 | datalen = pkt_dev->cur_pkt_size - 14 - |
@@ -2732,11 +2728,11 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, | |||
2732 | udph->len = htons(datalen + sizeof(struct udphdr)); | 2728 | udph->len = htons(datalen + sizeof(struct udphdr)); |
2733 | udph->check = 0; /* No checksum */ | 2729 | udph->check = 0; /* No checksum */ |
2734 | 2730 | ||
2735 | *(u32 *) iph = __constant_htonl(0x60000000); /* Version + flow */ | 2731 | *(__be32 *) iph = __constant_htonl(0x60000000); /* Version + flow */ |
2736 | 2732 | ||
2737 | if (pkt_dev->traffic_class) { | 2733 | if (pkt_dev->traffic_class) { |
2738 | /* Version + traffic class + flow (0) */ | 2734 | /* Version + traffic class + flow (0) */ |
2739 | *(u32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20)); | 2735 | *(__be32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20)); |
2740 | } | 2736 | } |
2741 | 2737 | ||
2742 | iph->hop_limit = 32; | 2738 | iph->hop_limit = 32; |
diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 79ebd75fbe4d..5f0818d815e6 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | #include <linux/vmalloc.h> | ||
18 | 19 | ||
19 | #include <net/request_sock.h> | 20 | #include <net/request_sock.h> |
20 | 21 | ||
@@ -29,22 +30,31 @@ | |||
29 | * it is absolutely not enough even at 100conn/sec. 256 cures most | 30 | * it is absolutely not enough even at 100conn/sec. 256 cures most |
30 | * of problems. This value is adjusted to 128 for very small machines | 31 | * of problems. This value is adjusted to 128 for very small machines |
31 | * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). | 32 | * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). |
32 | * Further increasing requires to change hash table size. | 33 | * Note : Dont forget somaxconn that may limit backlog too. |
33 | */ | 34 | */ |
34 | int sysctl_max_syn_backlog = 256; | 35 | int sysctl_max_syn_backlog = 256; |
35 | 36 | ||
36 | int reqsk_queue_alloc(struct request_sock_queue *queue, | 37 | int reqsk_queue_alloc(struct request_sock_queue *queue, |
37 | const int nr_table_entries) | 38 | unsigned int nr_table_entries) |
38 | { | 39 | { |
39 | const int lopt_size = sizeof(struct listen_sock) + | 40 | size_t lopt_size = sizeof(struct listen_sock); |
40 | nr_table_entries * sizeof(struct request_sock *); | 41 | struct listen_sock *lopt; |
41 | struct listen_sock *lopt = kzalloc(lopt_size, GFP_KERNEL); | 42 | |
42 | 43 | nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog); | |
44 | nr_table_entries = max_t(u32, nr_table_entries, 8); | ||
45 | nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); | ||
46 | lopt_size += nr_table_entries * sizeof(struct request_sock *); | ||
47 | if (lopt_size > PAGE_SIZE) | ||
48 | lopt = __vmalloc(lopt_size, | ||
49 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | ||
50 | PAGE_KERNEL); | ||
51 | else | ||
52 | lopt = kzalloc(lopt_size, GFP_KERNEL); | ||
43 | if (lopt == NULL) | 53 | if (lopt == NULL) |
44 | return -ENOMEM; | 54 | return -ENOMEM; |
45 | 55 | ||
46 | for (lopt->max_qlen_log = 6; | 56 | for (lopt->max_qlen_log = 3; |
47 | (1 << lopt->max_qlen_log) < sysctl_max_syn_backlog; | 57 | (1 << lopt->max_qlen_log) < nr_table_entries; |
48 | lopt->max_qlen_log++); | 58 | lopt->max_qlen_log++); |
49 | 59 | ||
50 | get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); | 60 | get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); |
@@ -65,9 +75,11 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) | |||
65 | { | 75 | { |
66 | /* make all the listen_opt local to us */ | 76 | /* make all the listen_opt local to us */ |
67 | struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue); | 77 | struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue); |
78 | size_t lopt_size = sizeof(struct listen_sock) + | ||
79 | lopt->nr_table_entries * sizeof(struct request_sock *); | ||
68 | 80 | ||
69 | if (lopt->qlen != 0) { | 81 | if (lopt->qlen != 0) { |
70 | int i; | 82 | unsigned int i; |
71 | 83 | ||
72 | for (i = 0; i < lopt->nr_table_entries; i++) { | 84 | for (i = 0; i < lopt->nr_table_entries; i++) { |
73 | struct request_sock *req; | 85 | struct request_sock *req; |
@@ -81,7 +93,10 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) | |||
81 | } | 93 | } |
82 | 94 | ||
83 | BUG_TRAP(lopt->qlen == 0); | 95 | BUG_TRAP(lopt->qlen == 0); |
84 | kfree(lopt); | 96 | if (lopt_size > PAGE_SIZE) |
97 | vfree(lopt); | ||
98 | else | ||
99 | kfree(lopt); | ||
85 | } | 100 | } |
86 | 101 | ||
87 | EXPORT_SYMBOL(reqsk_queue_destroy); | 102 | EXPORT_SYMBOL(reqsk_queue_destroy); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 02f3c7947898..e76539a5eb5e 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -108,7 +108,6 @@ static const int rtm_min[RTM_NR_FAMILIES] = | |||
108 | [RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)), | 108 | [RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)), |
109 | [RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)), | 109 | [RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)), |
110 | [RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)), | 110 | [RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)), |
111 | [RTM_FAM(RTM_NEWPREFIX)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), | ||
112 | [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), | 111 | [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), |
113 | [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), | 112 | [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), |
114 | }; | 113 | }; |
@@ -213,6 +212,26 @@ nla_put_failure: | |||
213 | return nla_nest_cancel(skb, mx); | 212 | return nla_nest_cancel(skb, mx); |
214 | } | 213 | } |
215 | 214 | ||
215 | int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, | ||
216 | u32 ts, u32 tsage, long expires, u32 error) | ||
217 | { | ||
218 | struct rta_cacheinfo ci = { | ||
219 | .rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse), | ||
220 | .rta_used = dst->__use, | ||
221 | .rta_clntref = atomic_read(&(dst->__refcnt)), | ||
222 | .rta_error = error, | ||
223 | .rta_id = id, | ||
224 | .rta_ts = ts, | ||
225 | .rta_tsage = tsage, | ||
226 | }; | ||
227 | |||
228 | if (expires) | ||
229 | ci.rta_expires = jiffies_to_clock_t(expires); | ||
230 | |||
231 | return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci); | ||
232 | } | ||
233 | |||
234 | EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); | ||
216 | 235 | ||
217 | static void set_operstate(struct net_device *dev, unsigned char transition) | 236 | static void set_operstate(struct net_device *dev, unsigned char transition) |
218 | { | 237 | { |
@@ -273,6 +292,25 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, | |||
273 | a->tx_compressed = b->tx_compressed; | 292 | a->tx_compressed = b->tx_compressed; |
274 | }; | 293 | }; |
275 | 294 | ||
295 | static inline size_t if_nlmsg_size(int iwbuflen) | ||
296 | { | ||
297 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) | ||
298 | + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ | ||
299 | + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ | ||
300 | + nla_total_size(sizeof(struct rtnl_link_ifmap)) | ||
301 | + nla_total_size(sizeof(struct rtnl_link_stats)) | ||
302 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ | ||
303 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ | ||
304 | + nla_total_size(4) /* IFLA_TXQLEN */ | ||
305 | + nla_total_size(4) /* IFLA_WEIGHT */ | ||
306 | + nla_total_size(4) /* IFLA_MTU */ | ||
307 | + nla_total_size(4) /* IFLA_LINK */ | ||
308 | + nla_total_size(4) /* IFLA_MASTER */ | ||
309 | + nla_total_size(1) /* IFLA_OPERSTATE */ | ||
310 | + nla_total_size(1) /* IFLA_LINKMODE */ | ||
311 | + nla_total_size(iwbuflen); | ||
312 | } | ||
313 | |||
276 | static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | 314 | static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, |
277 | void *iwbuf, int iwbuflen, int type, u32 pid, | 315 | void *iwbuf, int iwbuflen, int type, u32 pid, |
278 | u32 seq, u32 change, unsigned int flags) | 316 | u32 seq, u32 change, unsigned int flags) |
@@ -558,7 +596,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
558 | struct sk_buff *nskb; | 596 | struct sk_buff *nskb; |
559 | char *iw_buf = NULL, *iw = NULL; | 597 | char *iw_buf = NULL, *iw = NULL; |
560 | int iw_buf_len = 0; | 598 | int iw_buf_len = 0; |
561 | int err, payload; | 599 | int err; |
562 | 600 | ||
563 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); | 601 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); |
564 | if (err < 0) | 602 | if (err < 0) |
@@ -587,9 +625,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
587 | } | 625 | } |
588 | #endif /* CONFIG_NET_WIRELESS_RTNETLINK */ | 626 | #endif /* CONFIG_NET_WIRELESS_RTNETLINK */ |
589 | 627 | ||
590 | payload = NLMSG_ALIGN(sizeof(struct ifinfomsg) + | 628 | nskb = nlmsg_new(if_nlmsg_size(iw_buf_len), GFP_KERNEL); |
591 | nla_total_size(iw_buf_len)); | ||
592 | nskb = nlmsg_new(nlmsg_total_size(payload), GFP_KERNEL); | ||
593 | if (nskb == NULL) { | 629 | if (nskb == NULL) { |
594 | err = -ENOBUFS; | 630 | err = -ENOBUFS; |
595 | goto errout; | 631 | goto errout; |
@@ -597,10 +633,8 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
597 | 633 | ||
598 | err = rtnl_fill_ifinfo(nskb, dev, iw, iw_buf_len, RTM_NEWLINK, | 634 | err = rtnl_fill_ifinfo(nskb, dev, iw, iw_buf_len, RTM_NEWLINK, |
599 | NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0, 0); | 635 | NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0, 0); |
600 | if (err <= 0) { | 636 | /* failure impilies BUG in if_nlmsg_size or wireless_rtnetlink_get */ |
601 | kfree_skb(nskb); | 637 | BUG_ON(err < 0); |
602 | goto errout; | ||
603 | } | ||
604 | 638 | ||
605 | err = rtnl_unicast(nskb, NETLINK_CB(skb).pid); | 639 | err = rtnl_unicast(nskb, NETLINK_CB(skb).pid); |
606 | errout: | 640 | errout: |
@@ -639,15 +673,13 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) | |||
639 | struct sk_buff *skb; | 673 | struct sk_buff *skb; |
640 | int err = -ENOBUFS; | 674 | int err = -ENOBUFS; |
641 | 675 | ||
642 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | 676 | skb = nlmsg_new(if_nlmsg_size(0), GFP_KERNEL); |
643 | if (skb == NULL) | 677 | if (skb == NULL) |
644 | goto errout; | 678 | goto errout; |
645 | 679 | ||
646 | err = rtnl_fill_ifinfo(skb, dev, NULL, 0, type, 0, 0, change, 0); | 680 | err = rtnl_fill_ifinfo(skb, dev, NULL, 0, type, 0, 0, change, 0); |
647 | if (err < 0) { | 681 | /* failure implies BUG in if_nlmsg_size() */ |
648 | kfree_skb(skb); | 682 | BUG_ON(err < 0); |
649 | goto errout; | ||
650 | } | ||
651 | 683 | ||
652 | err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); | 684 | err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); |
653 | errout: | 685 | errout: |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b8b106358040..a90bc439488e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -473,8 +473,8 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
473 | #endif | 473 | #endif |
474 | C(protocol); | 474 | C(protocol); |
475 | n->destructor = NULL; | 475 | n->destructor = NULL; |
476 | C(mark); | ||
476 | #ifdef CONFIG_NETFILTER | 477 | #ifdef CONFIG_NETFILTER |
477 | C(nfmark); | ||
478 | C(nfct); | 478 | C(nfct); |
479 | nf_conntrack_get(skb->nfct); | 479 | nf_conntrack_get(skb->nfct); |
480 | C(nfctinfo); | 480 | C(nfctinfo); |
@@ -534,8 +534,8 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
534 | new->pkt_type = old->pkt_type; | 534 | new->pkt_type = old->pkt_type; |
535 | new->tstamp = old->tstamp; | 535 | new->tstamp = old->tstamp; |
536 | new->destructor = NULL; | 536 | new->destructor = NULL; |
537 | new->mark = old->mark; | ||
537 | #ifdef CONFIG_NETFILTER | 538 | #ifdef CONFIG_NETFILTER |
538 | new->nfmark = old->nfmark; | ||
539 | new->nfct = old->nfct; | 539 | new->nfct = old->nfct; |
540 | nf_conntrack_get(old->nfct); | 540 | nf_conntrack_get(old->nfct); |
541 | new->nfctinfo = old->nfctinfo; | 541 | new->nfctinfo = old->nfctinfo; |
@@ -1240,8 +1240,8 @@ EXPORT_SYMBOL(skb_store_bits); | |||
1240 | 1240 | ||
1241 | /* Checksum skb data. */ | 1241 | /* Checksum skb data. */ |
1242 | 1242 | ||
1243 | unsigned int skb_checksum(const struct sk_buff *skb, int offset, | 1243 | __wsum skb_checksum(const struct sk_buff *skb, int offset, |
1244 | int len, unsigned int csum) | 1244 | int len, __wsum csum) |
1245 | { | 1245 | { |
1246 | int start = skb_headlen(skb); | 1246 | int start = skb_headlen(skb); |
1247 | int i, copy = start - offset; | 1247 | int i, copy = start - offset; |
@@ -1265,7 +1265,7 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset, | |||
1265 | 1265 | ||
1266 | end = start + skb_shinfo(skb)->frags[i].size; | 1266 | end = start + skb_shinfo(skb)->frags[i].size; |
1267 | if ((copy = end - offset) > 0) { | 1267 | if ((copy = end - offset) > 0) { |
1268 | unsigned int csum2; | 1268 | __wsum csum2; |
1269 | u8 *vaddr; | 1269 | u8 *vaddr; |
1270 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1270 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1271 | 1271 | ||
@@ -1294,7 +1294,7 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset, | |||
1294 | 1294 | ||
1295 | end = start + list->len; | 1295 | end = start + list->len; |
1296 | if ((copy = end - offset) > 0) { | 1296 | if ((copy = end - offset) > 0) { |
1297 | unsigned int csum2; | 1297 | __wsum csum2; |
1298 | if (copy > len) | 1298 | if (copy > len) |
1299 | copy = len; | 1299 | copy = len; |
1300 | csum2 = skb_checksum(list, offset - start, | 1300 | csum2 = skb_checksum(list, offset - start, |
@@ -1315,8 +1315,8 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset, | |||
1315 | 1315 | ||
1316 | /* Both of above in one bottle. */ | 1316 | /* Both of above in one bottle. */ |
1317 | 1317 | ||
1318 | unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | 1318 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, |
1319 | u8 *to, int len, unsigned int csum) | 1319 | u8 *to, int len, __wsum csum) |
1320 | { | 1320 | { |
1321 | int start = skb_headlen(skb); | 1321 | int start = skb_headlen(skb); |
1322 | int i, copy = start - offset; | 1322 | int i, copy = start - offset; |
@@ -1342,7 +1342,7 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1342 | 1342 | ||
1343 | end = start + skb_shinfo(skb)->frags[i].size; | 1343 | end = start + skb_shinfo(skb)->frags[i].size; |
1344 | if ((copy = end - offset) > 0) { | 1344 | if ((copy = end - offset) > 0) { |
1345 | unsigned int csum2; | 1345 | __wsum csum2; |
1346 | u8 *vaddr; | 1346 | u8 *vaddr; |
1347 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1347 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1348 | 1348 | ||
@@ -1368,7 +1368,7 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1368 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1368 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
1369 | 1369 | ||
1370 | for (; list; list = list->next) { | 1370 | for (; list; list = list->next) { |
1371 | unsigned int csum2; | 1371 | __wsum csum2; |
1372 | int end; | 1372 | int end; |
1373 | 1373 | ||
1374 | BUG_TRAP(start <= offset + len); | 1374 | BUG_TRAP(start <= offset + len); |
@@ -1396,7 +1396,7 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1396 | 1396 | ||
1397 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) | 1397 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) |
1398 | { | 1398 | { |
1399 | unsigned int csum; | 1399 | __wsum csum; |
1400 | long csstart; | 1400 | long csstart; |
1401 | 1401 | ||
1402 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 1402 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
@@ -1414,9 +1414,9 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) | |||
1414 | skb->len - csstart, 0); | 1414 | skb->len - csstart, 0); |
1415 | 1415 | ||
1416 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1416 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1417 | long csstuff = csstart + skb->csum; | 1417 | long csstuff = csstart + skb->csum_offset; |
1418 | 1418 | ||
1419 | *((unsigned short *)(to + csstuff)) = csum_fold(csum); | 1419 | *((__sum16 *)(to + csstuff)) = csum_fold(csum); |
1420 | } | 1420 | } |
1421 | } | 1421 | } |
1422 | 1422 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index ee6cd2541d35..ab8fafadb4ba 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -270,7 +270,7 @@ out: | |||
270 | } | 270 | } |
271 | EXPORT_SYMBOL(sock_queue_rcv_skb); | 271 | EXPORT_SYMBOL(sock_queue_rcv_skb); |
272 | 272 | ||
273 | int sk_receive_skb(struct sock *sk, struct sk_buff *skb) | 273 | int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) |
274 | { | 274 | { |
275 | int rc = NET_RX_SUCCESS; | 275 | int rc = NET_RX_SUCCESS; |
276 | 276 | ||
@@ -279,7 +279,10 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb) | |||
279 | 279 | ||
280 | skb->dev = NULL; | 280 | skb->dev = NULL; |
281 | 281 | ||
282 | bh_lock_sock(sk); | 282 | if (nested) |
283 | bh_lock_sock_nested(sk); | ||
284 | else | ||
285 | bh_lock_sock(sk); | ||
283 | if (!sock_owned_by_user(sk)) { | 286 | if (!sock_owned_by_user(sk)) { |
284 | /* | 287 | /* |
285 | * trylock + unlock semantics: | 288 | * trylock + unlock semantics: |
@@ -1527,7 +1530,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
1527 | atomic_set(&sk->sk_refcnt, 1); | 1530 | atomic_set(&sk->sk_refcnt, 1); |
1528 | } | 1531 | } |
1529 | 1532 | ||
1530 | void fastcall lock_sock(struct sock *sk) | 1533 | void fastcall lock_sock_nested(struct sock *sk, int subclass) |
1531 | { | 1534 | { |
1532 | might_sleep(); | 1535 | might_sleep(); |
1533 | spin_lock_bh(&sk->sk_lock.slock); | 1536 | spin_lock_bh(&sk->sk_lock.slock); |
@@ -1538,11 +1541,11 @@ void fastcall lock_sock(struct sock *sk) | |||
1538 | /* | 1541 | /* |
1539 | * The sk_lock has mutex_lock() semantics here: | 1542 | * The sk_lock has mutex_lock() semantics here: |
1540 | */ | 1543 | */ |
1541 | mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); | 1544 | mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); |
1542 | local_bh_enable(); | 1545 | local_bh_enable(); |
1543 | } | 1546 | } |
1544 | 1547 | ||
1545 | EXPORT_SYMBOL(lock_sock); | 1548 | EXPORT_SYMBOL(lock_sock_nested); |
1546 | 1549 | ||
1547 | void fastcall release_sock(struct sock *sk) | 1550 | void fastcall release_sock(struct sock *sk) |
1548 | { | 1551 | { |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 02534131d88e..1e75b1585460 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -21,10 +21,6 @@ extern __u32 sysctl_rmem_max; | |||
21 | 21 | ||
22 | extern int sysctl_core_destroy_delay; | 22 | extern int sysctl_core_destroy_delay; |
23 | 23 | ||
24 | #ifdef CONFIG_NET_DIVERT | ||
25 | extern char sysctl_divert_version[]; | ||
26 | #endif /* CONFIG_NET_DIVERT */ | ||
27 | |||
28 | #ifdef CONFIG_XFRM | 24 | #ifdef CONFIG_XFRM |
29 | extern u32 sysctl_xfrm_aevent_etime; | 25 | extern u32 sysctl_xfrm_aevent_etime; |
30 | extern u32 sysctl_xfrm_aevent_rseqth; | 26 | extern u32 sysctl_xfrm_aevent_rseqth; |
@@ -105,16 +101,6 @@ ctl_table core_table[] = { | |||
105 | .mode = 0644, | 101 | .mode = 0644, |
106 | .proc_handler = &proc_dointvec | 102 | .proc_handler = &proc_dointvec |
107 | }, | 103 | }, |
108 | #ifdef CONFIG_NET_DIVERT | ||
109 | { | ||
110 | .ctl_name = NET_CORE_DIVERT_VERSION, | ||
111 | .procname = "divert_version", | ||
112 | .data = (void *)sysctl_divert_version, | ||
113 | .maxlen = 32, | ||
114 | .mode = 0444, | ||
115 | .proc_handler = &proc_dostring | ||
116 | }, | ||
117 | #endif /* CONFIG_NET_DIVERT */ | ||
118 | #ifdef CONFIG_XFRM | 104 | #ifdef CONFIG_XFRM |
119 | { | 105 | { |
120 | .ctl_name = NET_CORE_AEVENT_ETIME, | 106 | .ctl_name = NET_CORE_AEVENT_ETIME, |
diff --git a/net/core/utils.c b/net/core/utils.c index d93fe64f6693..61556065f07e 100644 --- a/net/core/utils.c +++ b/net/core/utils.c | |||
@@ -88,7 +88,7 @@ EXPORT_SYMBOL(in_aton); | |||
88 | #define IN6PTON_NULL 0x20000000 /* first/tail */ | 88 | #define IN6PTON_NULL 0x20000000 /* first/tail */ |
89 | #define IN6PTON_UNKNOWN 0x40000000 | 89 | #define IN6PTON_UNKNOWN 0x40000000 |
90 | 90 | ||
91 | static inline int digit2bin(char c, char delim) | 91 | static inline int digit2bin(char c, int delim) |
92 | { | 92 | { |
93 | if (c == delim || c == '\0') | 93 | if (c == delim || c == '\0') |
94 | return IN6PTON_DELIM; | 94 | return IN6PTON_DELIM; |
@@ -99,7 +99,7 @@ static inline int digit2bin(char c, char delim) | |||
99 | return IN6PTON_UNKNOWN; | 99 | return IN6PTON_UNKNOWN; |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline int xdigit2bin(char c, char delim) | 102 | static inline int xdigit2bin(char c, int delim) |
103 | { | 103 | { |
104 | if (c == delim || c == '\0') | 104 | if (c == delim || c == '\0') |
105 | return IN6PTON_DELIM; | 105 | return IN6PTON_DELIM; |
@@ -113,12 +113,14 @@ static inline int xdigit2bin(char c, char delim) | |||
113 | return (IN6PTON_XDIGIT | (c - 'a' + 10)); | 113 | return (IN6PTON_XDIGIT | (c - 'a' + 10)); |
114 | if (c >= 'A' && c <= 'F') | 114 | if (c >= 'A' && c <= 'F') |
115 | return (IN6PTON_XDIGIT | (c - 'A' + 10)); | 115 | return (IN6PTON_XDIGIT | (c - 'A' + 10)); |
116 | if (delim == -1) | ||
117 | return IN6PTON_DELIM; | ||
116 | return IN6PTON_UNKNOWN; | 118 | return IN6PTON_UNKNOWN; |
117 | } | 119 | } |
118 | 120 | ||
119 | int in4_pton(const char *src, int srclen, | 121 | int in4_pton(const char *src, int srclen, |
120 | u8 *dst, | 122 | u8 *dst, |
121 | char delim, const char **end) | 123 | int delim, const char **end) |
122 | { | 124 | { |
123 | const char *s; | 125 | const char *s; |
124 | u8 *d; | 126 | u8 *d; |
@@ -173,7 +175,7 @@ EXPORT_SYMBOL(in4_pton); | |||
173 | 175 | ||
174 | int in6_pton(const char *src, int srclen, | 176 | int in6_pton(const char *src, int srclen, |
175 | u8 *dst, | 177 | u8 *dst, |
176 | char delim, const char **end) | 178 | int delim, const char **end) |
177 | { | 179 | { |
178 | const char *s, *tok = NULL; | 180 | const char *s, *tok = NULL; |
179 | u8 *d, *dc = NULL; | 181 | u8 *d, *dc = NULL; |
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig index ef8919cca74b..b8a68dd41000 100644 --- a/net/dccp/Kconfig +++ b/net/dccp/Kconfig | |||
@@ -38,6 +38,9 @@ config IP_DCCP_DEBUG | |||
38 | ---help--- | 38 | ---help--- |
39 | Only use this if you're hacking DCCP. | 39 | Only use this if you're hacking DCCP. |
40 | 40 | ||
41 | When compiling DCCP as a module, this debugging output can be toggled | ||
42 | by setting the parameter dccp_debug of the `dccp' module to 0 or 1. | ||
43 | |||
41 | Just say N. | 44 | Just say N. |
42 | 45 | ||
43 | config NET_DCCPPROBE | 46 | config NET_DCCPPROBE |
@@ -49,7 +52,7 @@ config NET_DCCPPROBE | |||
49 | DCCP congestion avoidance modules. If you don't understand | 52 | DCCP congestion avoidance modules. If you don't understand |
50 | what was just said, you don't need it: say N. | 53 | what was just said, you don't need it: say N. |
51 | 54 | ||
52 | Documentation on how to use the packet generator can be found | 55 | Documentation on how to use DCCP connection probing can be found |
53 | at http://linux-net.osdl.org/index.php/DccpProbe | 56 | at http://linux-net.osdl.org/index.php/DccpProbe |
54 | 57 | ||
55 | To compile this code as a module, choose M here: the | 58 | To compile this code as a module, choose M here: the |
diff --git a/net/dccp/Makefile b/net/dccp/Makefile index 17ed99c46617..f4f8793aafff 100644 --- a/net/dccp/Makefile +++ b/net/dccp/Makefile | |||
@@ -1,13 +1,13 @@ | |||
1 | obj-$(CONFIG_IPV6) += dccp_ipv6.o | ||
2 | |||
3 | dccp_ipv6-y := ipv6.o | ||
4 | |||
5 | obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o | 1 | obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o |
6 | 2 | ||
7 | dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o | 3 | dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o |
8 | 4 | ||
9 | dccp_ipv4-y := ipv4.o | 5 | dccp_ipv4-y := ipv4.o |
10 | 6 | ||
7 | # build dccp_ipv6 as module whenever either IPv6 or DCCP is a module | ||
8 | obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o | ||
9 | dccp_ipv6-y := ipv6.o | ||
10 | |||
11 | dccp-$(CONFIG_IP_DCCP_ACKVEC) += ackvec.o | 11 | dccp-$(CONFIG_IP_DCCP_ACKVEC) += ackvec.o |
12 | 12 | ||
13 | obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o | 13 | obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o |
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index f8208874ac7d..bdf1bb7a82c0 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c | |||
@@ -67,15 +67,16 @@ static void dccp_ackvec_insert_avr(struct dccp_ackvec *av, | |||
67 | int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) | 67 | int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) |
68 | { | 68 | { |
69 | struct dccp_sock *dp = dccp_sk(sk); | 69 | struct dccp_sock *dp = dccp_sk(sk); |
70 | #ifdef CONFIG_IP_DCCP_DEBUG | ||
71 | const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ? | ||
72 | "CLIENT tx: " : "server tx: "; | ||
73 | #endif | ||
74 | struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec; | 70 | struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec; |
75 | int len = av->dccpav_vec_len + 2; | 71 | /* Figure out how many options do we need to represent the ackvec */ |
72 | const u16 nr_opts = (av->dccpav_vec_len + | ||
73 | DCCP_MAX_ACKVEC_OPT_LEN - 1) / | ||
74 | DCCP_MAX_ACKVEC_OPT_LEN; | ||
75 | u16 len = av->dccpav_vec_len + 2 * nr_opts, i; | ||
76 | struct timeval now; | 76 | struct timeval now; |
77 | u32 elapsed_time; | 77 | u32 elapsed_time; |
78 | unsigned char *to, *from; | 78 | const unsigned char *tail, *from; |
79 | unsigned char *to; | ||
79 | struct dccp_ackvec_record *avr; | 80 | struct dccp_ackvec_record *avr; |
80 | 81 | ||
81 | if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) | 82 | if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) |
@@ -94,24 +95,37 @@ int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) | |||
94 | 95 | ||
95 | DCCP_SKB_CB(skb)->dccpd_opt_len += len; | 96 | DCCP_SKB_CB(skb)->dccpd_opt_len += len; |
96 | 97 | ||
97 | to = skb_push(skb, len); | 98 | to = skb_push(skb, len); |
98 | *to++ = DCCPO_ACK_VECTOR_0; | ||
99 | *to++ = len; | ||
100 | |||
101 | len = av->dccpav_vec_len; | 99 | len = av->dccpav_vec_len; |
102 | from = av->dccpav_buf + av->dccpav_buf_head; | 100 | from = av->dccpav_buf + av->dccpav_buf_head; |
101 | tail = av->dccpav_buf + DCCP_MAX_ACKVEC_LEN; | ||
102 | |||
103 | for (i = 0; i < nr_opts; ++i) { | ||
104 | int copylen = len; | ||
103 | 105 | ||
104 | /* Check if buf_head wraps */ | 106 | if (len > DCCP_MAX_ACKVEC_OPT_LEN) |
105 | if ((int)av->dccpav_buf_head + len > DCCP_MAX_ACKVEC_LEN) { | 107 | copylen = DCCP_MAX_ACKVEC_OPT_LEN; |
106 | const u32 tailsize = DCCP_MAX_ACKVEC_LEN - av->dccpav_buf_head; | 108 | |
109 | *to++ = DCCPO_ACK_VECTOR_0; | ||
110 | *to++ = copylen + 2; | ||
111 | |||
112 | /* Check if buf_head wraps */ | ||
113 | if (from + copylen > tail) { | ||
114 | const u16 tailsize = tail - from; | ||
115 | |||
116 | memcpy(to, from, tailsize); | ||
117 | to += tailsize; | ||
118 | len -= tailsize; | ||
119 | copylen -= tailsize; | ||
120 | from = av->dccpav_buf; | ||
121 | } | ||
107 | 122 | ||
108 | memcpy(to, from, tailsize); | 123 | memcpy(to, from, copylen); |
109 | to += tailsize; | 124 | from += copylen; |
110 | len -= tailsize; | 125 | to += copylen; |
111 | from = av->dccpav_buf; | 126 | len -= copylen; |
112 | } | 127 | } |
113 | 128 | ||
114 | memcpy(to, from, len); | ||
115 | /* | 129 | /* |
116 | * From RFC 4340, A.2: | 130 | * From RFC 4340, A.2: |
117 | * | 131 | * |
@@ -129,9 +143,9 @@ int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) | |||
129 | 143 | ||
130 | dccp_ackvec_insert_avr(av, avr); | 144 | dccp_ackvec_insert_avr(av, avr); |
131 | 145 | ||
132 | dccp_pr_debug("%sACK Vector 0, len=%d, ack_seqno=%llu, " | 146 | dccp_pr_debug("%s ACK Vector 0, len=%d, ack_seqno=%llu, " |
133 | "ack_ackno=%llu\n", | 147 | "ack_ackno=%llu\n", |
134 | debug_prefix, avr->dccpavr_sent_len, | 148 | dccp_role(sk), avr->dccpavr_sent_len, |
135 | (unsigned long long)avr->dccpavr_ack_seqno, | 149 | (unsigned long long)avr->dccpavr_ack_seqno, |
136 | (unsigned long long)avr->dccpavr_ack_ackno); | 150 | (unsigned long long)avr->dccpavr_ack_ackno); |
137 | return 0; | 151 | return 0; |
@@ -145,7 +159,6 @@ struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority) | |||
145 | av->dccpav_buf_head = DCCP_MAX_ACKVEC_LEN - 1; | 159 | av->dccpav_buf_head = DCCP_MAX_ACKVEC_LEN - 1; |
146 | av->dccpav_buf_ackno = DCCP_MAX_SEQNO + 1; | 160 | av->dccpav_buf_ackno = DCCP_MAX_SEQNO + 1; |
147 | av->dccpav_buf_nonce = av->dccpav_buf_nonce = 0; | 161 | av->dccpav_buf_nonce = av->dccpav_buf_nonce = 0; |
148 | av->dccpav_ack_ptr = 0; | ||
149 | av->dccpav_time.tv_sec = 0; | 162 | av->dccpav_time.tv_sec = 0; |
150 | av->dccpav_time.tv_usec = 0; | 163 | av->dccpav_time.tv_usec = 0; |
151 | av->dccpav_vec_len = 0; | 164 | av->dccpav_vec_len = 0; |
@@ -174,13 +187,13 @@ void dccp_ackvec_free(struct dccp_ackvec *av) | |||
174 | } | 187 | } |
175 | 188 | ||
176 | static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av, | 189 | static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av, |
177 | const u8 index) | 190 | const u32 index) |
178 | { | 191 | { |
179 | return av->dccpav_buf[index] & DCCP_ACKVEC_STATE_MASK; | 192 | return av->dccpav_buf[index] & DCCP_ACKVEC_STATE_MASK; |
180 | } | 193 | } |
181 | 194 | ||
182 | static inline u8 dccp_ackvec_len(const struct dccp_ackvec *av, | 195 | static inline u8 dccp_ackvec_len(const struct dccp_ackvec *av, |
183 | const u8 index) | 196 | const u32 index) |
184 | { | 197 | { |
185 | return av->dccpav_buf[index] & DCCP_ACKVEC_LEN_MASK; | 198 | return av->dccpav_buf[index] & DCCP_ACKVEC_LEN_MASK; |
186 | } | 199 | } |
@@ -280,7 +293,7 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, | |||
280 | * could reduce the complexity of this scan.) | 293 | * could reduce the complexity of this scan.) |
281 | */ | 294 | */ |
282 | u64 delta = dccp_delta_seqno(ackno, av->dccpav_buf_ackno); | 295 | u64 delta = dccp_delta_seqno(ackno, av->dccpav_buf_ackno); |
283 | u8 index = av->dccpav_buf_head; | 296 | u32 index = av->dccpav_buf_head; |
284 | 297 | ||
285 | while (1) { | 298 | while (1) { |
286 | const u8 len = dccp_ackvec_len(av, index); | 299 | const u8 len = dccp_ackvec_len(av, index); |
@@ -322,21 +335,18 @@ out_duplicate: | |||
322 | #ifdef CONFIG_IP_DCCP_DEBUG | 335 | #ifdef CONFIG_IP_DCCP_DEBUG |
323 | void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len) | 336 | void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len) |
324 | { | 337 | { |
325 | if (!dccp_debug) | 338 | dccp_pr_debug_cat("ACK vector len=%d, ackno=%llu |", len, |
326 | return; | 339 | (unsigned long long)ackno); |
327 | |||
328 | printk("ACK vector len=%d, ackno=%llu |", len, | ||
329 | (unsigned long long)ackno); | ||
330 | 340 | ||
331 | while (len--) { | 341 | while (len--) { |
332 | const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6; | 342 | const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6; |
333 | const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; | 343 | const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; |
334 | 344 | ||
335 | printk("%d,%d|", state, rl); | 345 | dccp_pr_debug_cat("%d,%d|", state, rl); |
336 | ++vector; | 346 | ++vector; |
337 | } | 347 | } |
338 | 348 | ||
339 | printk("\n"); | 349 | dccp_pr_debug_cat("\n"); |
340 | } | 350 | } |
341 | 351 | ||
342 | void dccp_ackvec_print(const struct dccp_ackvec *av) | 352 | void dccp_ackvec_print(const struct dccp_ackvec *av) |
@@ -380,24 +390,20 @@ void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, struct sock *sk, | |||
380 | */ | 390 | */ |
381 | list_for_each_entry_reverse(avr, &av->dccpav_records, dccpavr_node) { | 391 | list_for_each_entry_reverse(avr, &av->dccpav_records, dccpavr_node) { |
382 | if (ackno == avr->dccpavr_ack_seqno) { | 392 | if (ackno == avr->dccpavr_ack_seqno) { |
383 | #ifdef CONFIG_IP_DCCP_DEBUG | 393 | dccp_pr_debug("%s ACK packet 0, len=%d, ack_seqno=%llu, " |
384 | struct dccp_sock *dp = dccp_sk(sk); | ||
385 | const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ? | ||
386 | "CLIENT rx ack: " : "server rx ack: "; | ||
387 | #endif | ||
388 | dccp_pr_debug("%sACK packet 0, len=%d, ack_seqno=%llu, " | ||
389 | "ack_ackno=%llu, ACKED!\n", | 394 | "ack_ackno=%llu, ACKED!\n", |
390 | debug_prefix, 1, | 395 | dccp_role(sk), 1, |
391 | (unsigned long long)avr->dccpavr_ack_seqno, | 396 | (unsigned long long)avr->dccpavr_ack_seqno, |
392 | (unsigned long long)avr->dccpavr_ack_ackno); | 397 | (unsigned long long)avr->dccpavr_ack_ackno); |
393 | dccp_ackvec_throw_record(av, avr); | 398 | dccp_ackvec_throw_record(av, avr); |
394 | break; | 399 | break; |
395 | } | 400 | } else if (avr->dccpavr_ack_seqno > ackno) |
401 | break; /* old news */ | ||
396 | } | 402 | } |
397 | } | 403 | } |
398 | 404 | ||
399 | static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av, | 405 | static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av, |
400 | struct sock *sk, u64 ackno, | 406 | struct sock *sk, u64 *ackno, |
401 | const unsigned char len, | 407 | const unsigned char len, |
402 | const unsigned char *vector) | 408 | const unsigned char *vector) |
403 | { | 409 | { |
@@ -420,7 +426,7 @@ static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av, | |||
420 | const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; | 426 | const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; |
421 | u64 ackno_end_rl; | 427 | u64 ackno_end_rl; |
422 | 428 | ||
423 | dccp_set_seqno(&ackno_end_rl, ackno - rl); | 429 | dccp_set_seqno(&ackno_end_rl, *ackno - rl); |
424 | 430 | ||
425 | /* | 431 | /* |
426 | * If our AVR sequence number is greater than the ack, go | 432 | * If our AVR sequence number is greater than the ack, go |
@@ -428,25 +434,19 @@ static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av, | |||
428 | */ | 434 | */ |
429 | list_for_each_entry_from(avr, &av->dccpav_records, | 435 | list_for_each_entry_from(avr, &av->dccpav_records, |
430 | dccpavr_node) { | 436 | dccpavr_node) { |
431 | if (!after48(avr->dccpavr_ack_seqno, ackno)) | 437 | if (!after48(avr->dccpavr_ack_seqno, *ackno)) |
432 | goto found; | 438 | goto found; |
433 | } | 439 | } |
434 | /* End of the dccpav_records list, not found, exit */ | 440 | /* End of the dccpav_records list, not found, exit */ |
435 | break; | 441 | break; |
436 | found: | 442 | found: |
437 | if (between48(avr->dccpavr_ack_seqno, ackno_end_rl, ackno)) { | 443 | if (between48(avr->dccpavr_ack_seqno, ackno_end_rl, *ackno)) { |
438 | const u8 state = *vector & DCCP_ACKVEC_STATE_MASK; | 444 | const u8 state = *vector & DCCP_ACKVEC_STATE_MASK; |
439 | if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) { | 445 | if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) { |
440 | #ifdef CONFIG_IP_DCCP_DEBUG | 446 | dccp_pr_debug("%s ACK vector 0, len=%d, " |
441 | struct dccp_sock *dp = dccp_sk(sk); | ||
442 | const char *debug_prefix = | ||
443 | dp->dccps_role == DCCP_ROLE_CLIENT ? | ||
444 | "CLIENT rx ack: " : "server rx ack: "; | ||
445 | #endif | ||
446 | dccp_pr_debug("%sACK vector 0, len=%d, " | ||
447 | "ack_seqno=%llu, ack_ackno=%llu, " | 447 | "ack_seqno=%llu, ack_ackno=%llu, " |
448 | "ACKED!\n", | 448 | "ACKED!\n", |
449 | debug_prefix, len, | 449 | dccp_role(sk), len, |
450 | (unsigned long long) | 450 | (unsigned long long) |
451 | avr->dccpavr_ack_seqno, | 451 | avr->dccpavr_ack_seqno, |
452 | (unsigned long long) | 452 | (unsigned long long) |
@@ -460,27 +460,23 @@ found: | |||
460 | */ | 460 | */ |
461 | } | 461 | } |
462 | 462 | ||
463 | dccp_set_seqno(&ackno, ackno_end_rl - 1); | 463 | dccp_set_seqno(ackno, ackno_end_rl - 1); |
464 | ++vector; | 464 | ++vector; |
465 | } | 465 | } |
466 | } | 466 | } |
467 | 467 | ||
468 | int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, | 468 | int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, |
469 | const u8 opt, const u8 *value, const u8 len) | 469 | u64 *ackno, const u8 opt, const u8 *value, const u8 len) |
470 | { | 470 | { |
471 | if (len > DCCP_MAX_ACKVEC_LEN) | 471 | if (len > DCCP_MAX_ACKVEC_OPT_LEN) |
472 | return -1; | 472 | return -1; |
473 | 473 | ||
474 | /* dccp_ackvector_print(DCCP_SKB_CB(skb)->dccpd_ack_seq, value, len); */ | 474 | /* dccp_ackvector_print(DCCP_SKB_CB(skb)->dccpd_ack_seq, value, len); */ |
475 | dccp_ackvec_check_rcv_ackvector(dccp_sk(sk)->dccps_hc_rx_ackvec, sk, | 475 | dccp_ackvec_check_rcv_ackvector(dccp_sk(sk)->dccps_hc_rx_ackvec, sk, |
476 | DCCP_SKB_CB(skb)->dccpd_ack_seq, | 476 | ackno, len, value); |
477 | len, value); | ||
478 | return 0; | 477 | return 0; |
479 | } | 478 | } |
480 | 479 | ||
481 | static char dccp_ackvec_slab_msg[] __initdata = | ||
482 | KERN_CRIT "DCCP: Unable to create ack vectors slab caches\n"; | ||
483 | |||
484 | int __init dccp_ackvec_init(void) | 480 | int __init dccp_ackvec_init(void) |
485 | { | 481 | { |
486 | dccp_ackvec_slab = kmem_cache_create("dccp_ackvec", | 482 | dccp_ackvec_slab = kmem_cache_create("dccp_ackvec", |
@@ -502,7 +498,7 @@ out_destroy_slab: | |||
502 | kmem_cache_destroy(dccp_ackvec_slab); | 498 | kmem_cache_destroy(dccp_ackvec_slab); |
503 | dccp_ackvec_slab = NULL; | 499 | dccp_ackvec_slab = NULL; |
504 | out_err: | 500 | out_err: |
505 | printk(dccp_ackvec_slab_msg); | 501 | DCCP_CRIT("Unable to create Ack Vector slab cache"); |
506 | return -ENOBUFS; | 502 | return -ENOBUFS; |
507 | } | 503 | } |
508 | 504 | ||
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h index cf8f20ce23a9..96504a3b16e4 100644 --- a/net/dccp/ackvec.h +++ b/net/dccp/ackvec.h | |||
@@ -17,7 +17,9 @@ | |||
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | 18 | ||
19 | /* Read about the ECN nonce to see why it is 253 */ | 19 | /* Read about the ECN nonce to see why it is 253 */ |
20 | #define DCCP_MAX_ACKVEC_LEN 253 | 20 | #define DCCP_MAX_ACKVEC_OPT_LEN 253 |
21 | /* We can spread an ack vector across multiple options */ | ||
22 | #define DCCP_MAX_ACKVEC_LEN (DCCP_MAX_ACKVEC_OPT_LEN * 2) | ||
21 | 23 | ||
22 | #define DCCP_ACKVEC_STATE_RECEIVED 0 | 24 | #define DCCP_ACKVEC_STATE_RECEIVED 0 |
23 | #define DCCP_ACKVEC_STATE_ECN_MARKED (1 << 6) | 25 | #define DCCP_ACKVEC_STATE_ECN_MARKED (1 << 6) |
@@ -41,7 +43,6 @@ | |||
41 | * Ack Vectors it has recently sent. For each packet sent carrying an | 43 | * Ack Vectors it has recently sent. For each packet sent carrying an |
42 | * Ack Vector, it remembers four variables: | 44 | * Ack Vector, it remembers four variables: |
43 | * | 45 | * |
44 | * @dccpav_ack_ptr - the value of buf_head at the time of acknowledgement. | ||
45 | * @dccpav_records - list of dccp_ackvec_record | 46 | * @dccpav_records - list of dccp_ackvec_record |
46 | * @dccpav_ack_nonce - the one-bit sum of the ECN Nonces for all State 0. | 47 | * @dccpav_ack_nonce - the one-bit sum of the ECN Nonces for all State 0. |
47 | * | 48 | * |
@@ -52,9 +53,8 @@ struct dccp_ackvec { | |||
52 | u64 dccpav_buf_ackno; | 53 | u64 dccpav_buf_ackno; |
53 | struct list_head dccpav_records; | 54 | struct list_head dccpav_records; |
54 | struct timeval dccpav_time; | 55 | struct timeval dccpav_time; |
55 | u8 dccpav_buf_head; | 56 | u16 dccpav_buf_head; |
56 | u8 dccpav_ack_ptr; | 57 | u16 dccpav_vec_len; |
57 | u8 dccpav_vec_len; | ||
58 | u8 dccpav_buf_nonce; | 58 | u8 dccpav_buf_nonce; |
59 | u8 dccpav_ack_nonce; | 59 | u8 dccpav_ack_nonce; |
60 | u8 dccpav_buf[DCCP_MAX_ACKVEC_LEN]; | 60 | u8 dccpav_buf[DCCP_MAX_ACKVEC_LEN]; |
@@ -77,9 +77,9 @@ struct dccp_ackvec_record { | |||
77 | struct list_head dccpavr_node; | 77 | struct list_head dccpavr_node; |
78 | u64 dccpavr_ack_seqno; | 78 | u64 dccpavr_ack_seqno; |
79 | u64 dccpavr_ack_ackno; | 79 | u64 dccpavr_ack_ackno; |
80 | u8 dccpavr_ack_ptr; | 80 | u16 dccpavr_ack_ptr; |
81 | u16 dccpavr_sent_len; | ||
81 | u8 dccpavr_ack_nonce; | 82 | u8 dccpavr_ack_nonce; |
82 | u8 dccpavr_sent_len; | ||
83 | }; | 83 | }; |
84 | 84 | ||
85 | struct sock; | 85 | struct sock; |
@@ -98,7 +98,8 @@ extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, | |||
98 | extern void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, | 98 | extern void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, |
99 | struct sock *sk, const u64 ackno); | 99 | struct sock *sk, const u64 ackno); |
100 | extern int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, | 100 | extern int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, |
101 | const u8 opt, const u8 *value, const u8 len); | 101 | u64 *ackno, const u8 opt, |
102 | const u8 *value, const u8 len); | ||
102 | 103 | ||
103 | extern int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb); | 104 | extern int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb); |
104 | 105 | ||
@@ -137,7 +138,8 @@ static inline void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, | |||
137 | } | 138 | } |
138 | 139 | ||
139 | static inline int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, | 140 | static inline int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb, |
140 | const u8 opt, const u8 *value, const u8 len) | 141 | const u64 *ackno, const u8 opt, |
142 | const u8 *value, const u8 len) | ||
141 | { | 143 | { |
142 | return -1; | 144 | return -1; |
143 | } | 145 | } |
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index f7eb6c613414..c7c29514dce8 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h | |||
@@ -52,9 +52,9 @@ struct ccid_operations { | |||
52 | unsigned char len, u16 idx, | 52 | unsigned char len, u16 idx, |
53 | unsigned char* value); | 53 | unsigned char* value); |
54 | int (*ccid_hc_tx_send_packet)(struct sock *sk, | 54 | int (*ccid_hc_tx_send_packet)(struct sock *sk, |
55 | struct sk_buff *skb, int len); | 55 | struct sk_buff *skb); |
56 | void (*ccid_hc_tx_packet_sent)(struct sock *sk, int more, | 56 | void (*ccid_hc_tx_packet_sent)(struct sock *sk, |
57 | int len); | 57 | int more, unsigned int len); |
58 | void (*ccid_hc_rx_get_info)(struct sock *sk, | 58 | void (*ccid_hc_rx_get_info)(struct sock *sk, |
59 | struct tcp_info *info); | 59 | struct tcp_info *info); |
60 | void (*ccid_hc_tx_get_info)(struct sock *sk, | 60 | void (*ccid_hc_tx_get_info)(struct sock *sk, |
@@ -94,16 +94,16 @@ extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk); | |||
94 | extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk); | 94 | extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk); |
95 | 95 | ||
96 | static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk, | 96 | static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk, |
97 | struct sk_buff *skb, int len) | 97 | struct sk_buff *skb) |
98 | { | 98 | { |
99 | int rc = 0; | 99 | int rc = 0; |
100 | if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL) | 100 | if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL) |
101 | rc = ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb, len); | 101 | rc = ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb); |
102 | return rc; | 102 | return rc; |
103 | } | 103 | } |
104 | 104 | ||
105 | static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk, | 105 | static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk, |
106 | int more, int len) | 106 | int more, unsigned int len) |
107 | { | 107 | { |
108 | if (ccid->ccid_ops->ccid_hc_tx_packet_sent != NULL) | 108 | if (ccid->ccid_ops->ccid_hc_tx_packet_sent != NULL) |
109 | ccid->ccid_ops->ccid_hc_tx_packet_sent(sk, more, len); | 109 | ccid->ccid_ops->ccid_hc_tx_packet_sent(sk, more, len); |
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig index 8533dabfb9f8..dac89166eb18 100644 --- a/net/dccp/ccids/Kconfig +++ b/net/dccp/ccids/Kconfig | |||
@@ -28,13 +28,20 @@ config IP_DCCP_CCID2 | |||
28 | This text was extracted from RFC 4340 (sec. 10.1), | 28 | This text was extracted from RFC 4340 (sec. 10.1), |
29 | http://www.ietf.org/rfc/rfc4340.txt | 29 | http://www.ietf.org/rfc/rfc4340.txt |
30 | 30 | ||
31 | To compile this CCID as a module, choose M here: the module will be | ||
32 | called dccp_ccid2. | ||
33 | |||
31 | If in doubt, say M. | 34 | If in doubt, say M. |
32 | 35 | ||
33 | config IP_DCCP_CCID2_DEBUG | 36 | config IP_DCCP_CCID2_DEBUG |
34 | bool "CCID2 debug" | 37 | bool "CCID2 debugging messages" |
35 | depends on IP_DCCP_CCID2 | 38 | depends on IP_DCCP_CCID2 |
36 | ---help--- | 39 | ---help--- |
37 | Enable CCID2 debug messages. | 40 | Enable CCID2-specific debugging messages. |
41 | |||
42 | When compiling CCID2 as a module, this debugging output can | ||
43 | additionally be toggled by setting the ccid2_debug module | ||
44 | parameter to 0 or 1. | ||
38 | 45 | ||
39 | If in doubt, say N. | 46 | If in doubt, say N. |
40 | 47 | ||
@@ -62,10 +69,24 @@ config IP_DCCP_CCID3 | |||
62 | This text was extracted from RFC 4340 (sec. 10.2), | 69 | This text was extracted from RFC 4340 (sec. 10.2), |
63 | http://www.ietf.org/rfc/rfc4340.txt | 70 | http://www.ietf.org/rfc/rfc4340.txt |
64 | 71 | ||
72 | To compile this CCID as a module, choose M here: the module will be | ||
73 | called dccp_ccid3. | ||
74 | |||
65 | If in doubt, say M. | 75 | If in doubt, say M. |
66 | 76 | ||
67 | config IP_DCCP_TFRC_LIB | 77 | config IP_DCCP_TFRC_LIB |
68 | depends on IP_DCCP_CCID3 | 78 | depends on IP_DCCP_CCID3 |
69 | def_tristate IP_DCCP_CCID3 | 79 | def_tristate IP_DCCP_CCID3 |
70 | 80 | ||
81 | config IP_DCCP_CCID3_DEBUG | ||
82 | bool "CCID3 debugging messages" | ||
83 | depends on IP_DCCP_CCID3 | ||
84 | ---help--- | ||
85 | Enable CCID3-specific debugging messages. | ||
86 | |||
87 | When compiling CCID3 as a module, this debugging output can | ||
88 | additionally be toggled by setting the ccid3_debug module | ||
89 | parameter to 0 or 1. | ||
90 | |||
91 | If in doubt, say N. | ||
71 | endmenu | 92 | endmenu |
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 162032baeac0..2555be8f4790 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c | |||
@@ -33,18 +33,11 @@ | |||
33 | #include "../dccp.h" | 33 | #include "../dccp.h" |
34 | #include "ccid2.h" | 34 | #include "ccid2.h" |
35 | 35 | ||
36 | static int ccid2_debug; | ||
37 | 36 | ||
38 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | 37 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
39 | #define ccid2_pr_debug(format, a...) \ | 38 | static int ccid2_debug; |
40 | do { if (ccid2_debug) \ | 39 | #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) |
41 | printk(KERN_DEBUG "%s: " format, __FUNCTION__, ##a); \ | ||
42 | } while (0) | ||
43 | #else | ||
44 | #define ccid2_pr_debug(format, a...) | ||
45 | #endif | ||
46 | 40 | ||
47 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | ||
48 | static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) | 41 | static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) |
49 | { | 42 | { |
50 | int len = 0; | 43 | int len = 0; |
@@ -86,7 +79,8 @@ static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) | |||
86 | BUG_ON(len != hctx->ccid2hctx_seqbufc * CCID2_SEQBUF_LEN); | 79 | BUG_ON(len != hctx->ccid2hctx_seqbufc * CCID2_SEQBUF_LEN); |
87 | } | 80 | } |
88 | #else | 81 | #else |
89 | #define ccid2_hc_tx_check_sanity(hctx) do {} while (0) | 82 | #define ccid2_pr_debug(format, a...) |
83 | #define ccid2_hc_tx_check_sanity(hctx) | ||
90 | #endif | 84 | #endif |
91 | 85 | ||
92 | static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx, int num, | 86 | static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx, int num, |
@@ -131,8 +125,7 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx, int num, | |||
131 | return 0; | 125 | return 0; |
132 | } | 126 | } |
133 | 127 | ||
134 | static int ccid2_hc_tx_send_packet(struct sock *sk, | 128 | static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) |
135 | struct sk_buff *skb, int len) | ||
136 | { | 129 | { |
137 | struct ccid2_hc_tx_sock *hctx; | 130 | struct ccid2_hc_tx_sock *hctx; |
138 | 131 | ||
@@ -274,7 +267,7 @@ static void ccid2_start_rto_timer(struct sock *sk) | |||
274 | jiffies + hctx->ccid2hctx_rto); | 267 | jiffies + hctx->ccid2hctx_rto); |
275 | } | 268 | } |
276 | 269 | ||
277 | static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, int len) | 270 | static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) |
278 | { | 271 | { |
279 | struct dccp_sock *dp = dccp_sk(sk); | 272 | struct dccp_sock *dp = dccp_sk(sk); |
280 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); | 273 | struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); |
@@ -426,7 +419,7 @@ static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset, | |||
426 | return -1; | 419 | return -1; |
427 | 420 | ||
428 | out_invalid_option: | 421 | out_invalid_option: |
429 | BUG_ON(1); /* should never happen... options were previously parsed ! */ | 422 | DCCP_BUG("Invalid option - this should not happen (previous parsing)!"); |
430 | return -1; | 423 | return -1; |
431 | } | 424 | } |
432 | 425 | ||
@@ -619,7 +612,17 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
619 | } | 612 | } |
620 | 613 | ||
621 | ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; | 614 | ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; |
622 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | 615 | if (after48(ackno, hctx->ccid2hctx_high_ack)) |
616 | hctx->ccid2hctx_high_ack = ackno; | ||
617 | |||
618 | seqp = hctx->ccid2hctx_seqt; | ||
619 | while (before48(seqp->ccid2s_seq, ackno)) { | ||
620 | seqp = seqp->ccid2s_next; | ||
621 | if (seqp == hctx->ccid2hctx_seqh) { | ||
622 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | ||
623 | break; | ||
624 | } | ||
625 | } | ||
623 | 626 | ||
624 | /* If in slow-start, cwnd can increase at most Ack Ratio / 2 packets for | 627 | /* If in slow-start, cwnd can increase at most Ack Ratio / 2 packets for |
625 | * this single ack. I round up. | 628 | * this single ack. I round up. |
@@ -697,7 +700,14 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
697 | /* The state about what is acked should be correct now | 700 | /* The state about what is acked should be correct now |
698 | * Check for NUMDUPACK | 701 | * Check for NUMDUPACK |
699 | */ | 702 | */ |
700 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | 703 | seqp = hctx->ccid2hctx_seqt; |
704 | while (before48(seqp->ccid2s_seq, hctx->ccid2hctx_high_ack)) { | ||
705 | seqp = seqp->ccid2s_next; | ||
706 | if (seqp == hctx->ccid2hctx_seqh) { | ||
707 | seqp = hctx->ccid2hctx_seqh->ccid2s_prev; | ||
708 | break; | ||
709 | } | ||
710 | } | ||
701 | done = 0; | 711 | done = 0; |
702 | while (1) { | 712 | while (1) { |
703 | if (seqp->ccid2s_acked) { | 713 | if (seqp->ccid2s_acked) { |
@@ -771,6 +781,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) | |||
771 | hctx->ccid2hctx_lastrtt = 0; | 781 | hctx->ccid2hctx_lastrtt = 0; |
772 | hctx->ccid2hctx_rpdupack = -1; | 782 | hctx->ccid2hctx_rpdupack = -1; |
773 | hctx->ccid2hctx_last_cong = jiffies; | 783 | hctx->ccid2hctx_last_cong = jiffies; |
784 | hctx->ccid2hctx_high_ack = 0; | ||
774 | 785 | ||
775 | hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire; | 786 | hctx->ccid2hctx_rtotimer.function = &ccid2_hc_tx_rto_expire; |
776 | hctx->ccid2hctx_rtotimer.data = (unsigned long)sk; | 787 | hctx->ccid2hctx_rtotimer.data = (unsigned long)sk; |
@@ -823,8 +834,10 @@ static struct ccid_operations ccid2 = { | |||
823 | .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv, | 834 | .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv, |
824 | }; | 835 | }; |
825 | 836 | ||
837 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG | ||
826 | module_param(ccid2_debug, int, 0444); | 838 | module_param(ccid2_debug, int, 0444); |
827 | MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); | 839 | MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); |
840 | #endif | ||
828 | 841 | ||
829 | static __init int ccid2_module_init(void) | 842 | static __init int ccid2_module_init(void) |
830 | { | 843 | { |
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h index 5b2ef4acb300..ebd79499c85a 100644 --- a/net/dccp/ccids/ccid2.h +++ b/net/dccp/ccids/ccid2.h | |||
@@ -35,7 +35,7 @@ struct ccid2_seq { | |||
35 | struct ccid2_seq *ccid2s_next; | 35 | struct ccid2_seq *ccid2s_next; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | #define CCID2_SEQBUF_LEN 256 | 38 | #define CCID2_SEQBUF_LEN 1024 |
39 | #define CCID2_SEQBUF_MAX 128 | 39 | #define CCID2_SEQBUF_MAX 128 |
40 | 40 | ||
41 | /** struct ccid2_hc_tx_sock - CCID2 TX half connection | 41 | /** struct ccid2_hc_tx_sock - CCID2 TX half connection |
@@ -72,6 +72,7 @@ struct ccid2_hc_tx_sock { | |||
72 | int ccid2hctx_rpdupack; | 72 | int ccid2hctx_rpdupack; |
73 | int ccid2hctx_sendwait; | 73 | int ccid2hctx_sendwait; |
74 | unsigned long ccid2hctx_last_cong; | 74 | unsigned long ccid2hctx_last_cong; |
75 | u64 ccid2hctx_high_ack; | ||
75 | }; | 76 | }; |
76 | 77 | ||
77 | struct ccid2_hc_rx_sock { | 78 | struct ccid2_hc_rx_sock { |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index cec23ad286de..70ebe705eb75 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -60,13 +60,11 @@ static u32 usecs_div(const u32 a, const u32 b) | |||
60 | return (b >= 2 * div) ? tmp / (b / div) : tmp; | 60 | return (b >= 2 * div) ? tmp / (b / div) : tmp; |
61 | } | 61 | } |
62 | 62 | ||
63 | static int ccid3_debug; | ||
64 | 63 | ||
65 | #ifdef CCID3_DEBUG | 64 | |
66 | #define ccid3_pr_debug(format, a...) \ | 65 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
67 | do { if (ccid3_debug) \ | 66 | static int ccid3_debug; |
68 | printk(KERN_DEBUG "%s: " format, __FUNCTION__, ##a); \ | 67 | #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) |
69 | } while (0) | ||
70 | #else | 68 | #else |
71 | #define ccid3_pr_debug(format, a...) | 69 | #define ccid3_pr_debug(format, a...) |
72 | #endif | 70 | #endif |
@@ -75,15 +73,7 @@ static struct dccp_tx_hist *ccid3_tx_hist; | |||
75 | static struct dccp_rx_hist *ccid3_rx_hist; | 73 | static struct dccp_rx_hist *ccid3_rx_hist; |
76 | static struct dccp_li_hist *ccid3_li_hist; | 74 | static struct dccp_li_hist *ccid3_li_hist; |
77 | 75 | ||
78 | /* TFRC sender states */ | 76 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
79 | enum ccid3_hc_tx_states { | ||
80 | TFRC_SSTATE_NO_SENT = 1, | ||
81 | TFRC_SSTATE_NO_FBACK, | ||
82 | TFRC_SSTATE_FBACK, | ||
83 | TFRC_SSTATE_TERM, | ||
84 | }; | ||
85 | |||
86 | #ifdef CCID3_DEBUG | ||
87 | static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state) | 77 | static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state) |
88 | { | 78 | { |
89 | static char *ccid3_state_names[] = { | 79 | static char *ccid3_state_names[] = { |
@@ -110,25 +100,24 @@ static void ccid3_hc_tx_set_state(struct sock *sk, | |||
110 | hctx->ccid3hctx_state = state; | 100 | hctx->ccid3hctx_state = state; |
111 | } | 101 | } |
112 | 102 | ||
113 | /* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */ | 103 | /* |
114 | static inline void ccid3_calc_new_t_ipi(struct ccid3_hc_tx_sock *hctx) | 104 | * Recalculate scheduled nominal send time t_nom, inter-packet interval |
105 | * t_ipi, and delta value. Should be called after each change to X. | ||
106 | */ | ||
107 | static inline void ccid3_update_send_time(struct ccid3_hc_tx_sock *hctx) | ||
115 | { | 108 | { |
116 | /* | 109 | timeval_sub_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); |
117 | * If no feedback spec says t_ipi is 1 second (set elsewhere and then | ||
118 | * doubles after every no feedback timer (separate function) | ||
119 | */ | ||
120 | if (hctx->ccid3hctx_state != TFRC_SSTATE_NO_FBACK) | ||
121 | hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s, | ||
122 | hctx->ccid3hctx_x); | ||
123 | } | ||
124 | 110 | ||
125 | /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */ | 111 | /* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */ |
126 | static inline void ccid3_calc_new_delta(struct ccid3_hc_tx_sock *hctx) | 112 | hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_x); |
127 | { | 113 | |
114 | /* Update nominal send time with regard to the new t_ipi */ | ||
115 | timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); | ||
116 | |||
117 | /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */ | ||
128 | hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2, | 118 | hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2, |
129 | TFRC_OPSYS_HALF_TIME_GRAN); | 119 | TFRC_OPSYS_HALF_TIME_GRAN); |
130 | } | 120 | } |
131 | |||
132 | /* | 121 | /* |
133 | * Update X by | 122 | * Update X by |
134 | * If (p > 0) | 123 | * If (p > 0) |
@@ -139,76 +128,85 @@ static inline void ccid3_calc_new_delta(struct ccid3_hc_tx_sock *hctx) | |||
139 | * X = max(min(2 * X, 2 * X_recv), s / R); | 128 | * X = max(min(2 * X, 2 * X_recv), s / R); |
140 | * tld = now; | 129 | * tld = now; |
141 | */ | 130 | */ |
142 | static void ccid3_hc_tx_update_x(struct sock *sk) | 131 | static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now) |
132 | |||
143 | { | 133 | { |
144 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 134 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); |
135 | const __u32 old_x = hctx->ccid3hctx_x; | ||
145 | 136 | ||
146 | /* To avoid large error in calcX */ | 137 | /* To avoid large error in calcX */ |
147 | if (hctx->ccid3hctx_p >= TFRC_SMALLEST_P) { | 138 | if (hctx->ccid3hctx_p >= TFRC_SMALLEST_P) { |
148 | hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s, | 139 | hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s, |
149 | hctx->ccid3hctx_rtt, | 140 | hctx->ccid3hctx_rtt, |
150 | hctx->ccid3hctx_p); | 141 | hctx->ccid3hctx_p); |
151 | hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_calc, | 142 | hctx->ccid3hctx_x = max_t(u32, min(hctx->ccid3hctx_x_calc, |
152 | 2 * hctx->ccid3hctx_x_recv), | 143 | hctx->ccid3hctx_x_recv * 2), |
153 | (hctx->ccid3hctx_s / | 144 | hctx->ccid3hctx_s / TFRC_T_MBI); |
154 | TFRC_MAX_BACK_OFF_TIME)); | 145 | |
155 | } else { | 146 | } else if (timeval_delta(now, &hctx->ccid3hctx_t_ld) >= |
156 | struct timeval now; | 147 | hctx->ccid3hctx_rtt) { |
148 | hctx->ccid3hctx_x = max(min(hctx->ccid3hctx_x_recv, | ||
149 | hctx->ccid3hctx_x ) * 2, | ||
150 | usecs_div(hctx->ccid3hctx_s, | ||
151 | hctx->ccid3hctx_rtt) ); | ||
152 | hctx->ccid3hctx_t_ld = *now; | ||
153 | } else | ||
154 | ccid3_pr_debug("Not changing X\n"); | ||
157 | 155 | ||
158 | dccp_timestamp(sk, &now); | 156 | if (hctx->ccid3hctx_x != old_x) |
159 | if (timeval_delta(&now, &hctx->ccid3hctx_t_ld) >= | 157 | ccid3_update_send_time(hctx); |
160 | hctx->ccid3hctx_rtt) { | 158 | } |
161 | hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_recv, | 159 | |
162 | hctx->ccid3hctx_x) * 2, | 160 | /* |
163 | usecs_div(hctx->ccid3hctx_s, | 161 | * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1) |
164 | hctx->ccid3hctx_rtt)); | 162 | * @len: DCCP packet payload size in bytes |
165 | hctx->ccid3hctx_t_ld = now; | 163 | */ |
166 | } | 164 | static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hctx, int len) |
167 | } | 165 | { |
166 | if (unlikely(len == 0)) | ||
167 | ccid3_pr_debug("Packet payload length is 0 - not updating\n"); | ||
168 | else | ||
169 | hctx->ccid3hctx_s = hctx->ccid3hctx_s == 0 ? len : | ||
170 | (9 * hctx->ccid3hctx_s + len) / 10; | ||
171 | /* | ||
172 | * Note: We could do a potential optimisation here - when `s' changes, | ||
173 | * recalculate sending rate and consequently t_ipi, t_delta, and | ||
174 | * t_now. This is however non-standard, and the benefits are not | ||
175 | * clear, so it is currently left out. | ||
176 | */ | ||
168 | } | 177 | } |
169 | 178 | ||
170 | static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | 179 | static void ccid3_hc_tx_no_feedback_timer(unsigned long data) |
171 | { | 180 | { |
172 | struct sock *sk = (struct sock *)data; | 181 | struct sock *sk = (struct sock *)data; |
173 | unsigned long next_tmout = 0; | ||
174 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 182 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); |
183 | unsigned long t_nfb = USEC_PER_SEC / 5; | ||
175 | 184 | ||
176 | bh_lock_sock(sk); | 185 | bh_lock_sock(sk); |
177 | if (sock_owned_by_user(sk)) { | 186 | if (sock_owned_by_user(sk)) { |
178 | /* Try again later. */ | 187 | /* Try again later. */ |
179 | /* XXX: set some sensible MIB */ | 188 | /* XXX: set some sensible MIB */ |
180 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | 189 | goto restart_timer; |
181 | jiffies + HZ / 5); | ||
182 | goto out; | ||
183 | } | 190 | } |
184 | 191 | ||
185 | ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk, | 192 | ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk, |
186 | ccid3_tx_state_name(hctx->ccid3hctx_state)); | 193 | ccid3_tx_state_name(hctx->ccid3hctx_state)); |
187 | 194 | ||
188 | switch (hctx->ccid3hctx_state) { | 195 | switch (hctx->ccid3hctx_state) { |
189 | case TFRC_SSTATE_TERM: | ||
190 | goto out; | ||
191 | case TFRC_SSTATE_NO_FBACK: | 196 | case TFRC_SSTATE_NO_FBACK: |
192 | /* Halve send rate */ | 197 | /* RFC 3448, 4.4: Halve send rate directly */ |
193 | hctx->ccid3hctx_x /= 2; | 198 | hctx->ccid3hctx_x = min_t(u32, hctx->ccid3hctx_x / 2, |
194 | if (hctx->ccid3hctx_x < (hctx->ccid3hctx_s / | 199 | hctx->ccid3hctx_s / TFRC_T_MBI); |
195 | TFRC_MAX_BACK_OFF_TIME)) | ||
196 | hctx->ccid3hctx_x = (hctx->ccid3hctx_s / | ||
197 | TFRC_MAX_BACK_OFF_TIME); | ||
198 | 200 | ||
199 | ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d " | 201 | ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d " |
200 | "bytes/s\n", | 202 | "bytes/s\n", |
201 | dccp_role(sk), sk, | 203 | dccp_role(sk), sk, |
202 | ccid3_tx_state_name(hctx->ccid3hctx_state), | 204 | ccid3_tx_state_name(hctx->ccid3hctx_state), |
203 | hctx->ccid3hctx_x); | 205 | hctx->ccid3hctx_x); |
204 | next_tmout = max_t(u32, 2 * usecs_div(hctx->ccid3hctx_s, | 206 | /* The value of R is still undefined and so we can not recompute |
205 | hctx->ccid3hctx_x), | 207 | * the timout value. Keep initial value as per [RFC 4342, 5]. */ |
206 | TFRC_INITIAL_TIMEOUT); | 208 | t_nfb = TFRC_INITIAL_TIMEOUT; |
207 | /* | 209 | ccid3_update_send_time(hctx); |
208 | * FIXME - not sure above calculation is correct. See section | ||
209 | * 5 of CCID3 11 should adjust tx_t_ipi and double that to | ||
210 | * achieve it really | ||
211 | */ | ||
212 | break; | 210 | break; |
213 | case TFRC_SSTATE_FBACK: | 211 | case TFRC_SSTATE_FBACK: |
214 | /* | 212 | /* |
@@ -218,6 +216,8 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | |||
218 | if (!hctx->ccid3hctx_idle || | 216 | if (!hctx->ccid3hctx_idle || |
219 | (hctx->ccid3hctx_x_recv >= | 217 | (hctx->ccid3hctx_x_recv >= |
220 | 4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) { | 218 | 4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) { |
219 | struct timeval now; | ||
220 | |||
221 | ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n", | 221 | ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n", |
222 | dccp_role(sk), sk, | 222 | dccp_role(sk), sk, |
223 | ccid3_tx_state_name(hctx->ccid3hctx_state)); | 223 | ccid3_tx_state_name(hctx->ccid3hctx_state)); |
@@ -235,55 +235,60 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data) | |||
235 | if (hctx->ccid3hctx_p < TFRC_SMALLEST_P || | 235 | if (hctx->ccid3hctx_p < TFRC_SMALLEST_P || |
236 | hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv) | 236 | hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv) |
237 | hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2, | 237 | hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2, |
238 | hctx->ccid3hctx_s / (2 * TFRC_MAX_BACK_OFF_TIME)); | 238 | hctx->ccid3hctx_s / (2 * TFRC_T_MBI)); |
239 | else | 239 | else |
240 | hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4; | 240 | hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4; |
241 | 241 | ||
242 | /* Update sending rate */ | 242 | /* Update sending rate */ |
243 | ccid3_hc_tx_update_x(sk); | 243 | dccp_timestamp(sk, &now); |
244 | ccid3_hc_tx_update_x(sk, &now); | ||
244 | } | 245 | } |
245 | /* | 246 | /* |
246 | * Schedule no feedback timer to expire in | 247 | * Schedule no feedback timer to expire in |
247 | * max(4 * R, 2 * s / X) | 248 | * max(4 * R, 2 * s/X) = max(4 * R, 2 * t_ipi) |
248 | */ | 249 | */ |
249 | next_tmout = max_t(u32, hctx->ccid3hctx_t_rto, | 250 | t_nfb = max(4 * hctx->ccid3hctx_rtt, 2 * hctx->ccid3hctx_t_ipi); |
250 | 2 * usecs_div(hctx->ccid3hctx_s, | ||
251 | hctx->ccid3hctx_x)); | ||
252 | break; | 251 | break; |
253 | default: | 252 | case TFRC_SSTATE_NO_SENT: |
254 | printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n", | 253 | DCCP_BUG("Illegal %s state NO_SENT, sk=%p", dccp_role(sk), sk); |
255 | __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state); | 254 | /* fall through */ |
256 | dump_stack(); | 255 | case TFRC_SSTATE_TERM: |
257 | goto out; | 256 | goto out; |
258 | } | 257 | } |
259 | 258 | ||
260 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | ||
261 | jiffies + max_t(u32, 1, usecs_to_jiffies(next_tmout))); | ||
262 | hctx->ccid3hctx_idle = 1; | 259 | hctx->ccid3hctx_idle = 1; |
260 | |||
261 | restart_timer: | ||
262 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | ||
263 | jiffies + usecs_to_jiffies(t_nfb)); | ||
263 | out: | 264 | out: |
264 | bh_unlock_sock(sk); | 265 | bh_unlock_sock(sk); |
265 | sock_put(sk); | 266 | sock_put(sk); |
266 | } | 267 | } |
267 | 268 | ||
268 | static int ccid3_hc_tx_send_packet(struct sock *sk, | 269 | /* |
269 | struct sk_buff *skb, int len) | 270 | * returns |
271 | * > 0: delay (in msecs) that should pass before actually sending | ||
272 | * = 0: can send immediately | ||
273 | * < 0: error condition; do not send packet | ||
274 | */ | ||
275 | static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | ||
270 | { | 276 | { |
271 | struct dccp_sock *dp = dccp_sk(sk); | 277 | struct dccp_sock *dp = dccp_sk(sk); |
272 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 278 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); |
273 | struct dccp_tx_hist_entry *new_packet; | 279 | struct dccp_tx_hist_entry *new_packet; |
274 | struct timeval now; | 280 | struct timeval now; |
275 | long delay; | 281 | long delay; |
276 | int rc = -ENOTCONN; | ||
277 | 282 | ||
278 | BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM); | 283 | BUG_ON(hctx == NULL); |
279 | 284 | ||
280 | /* Check if pure ACK or Terminating*/ | ||
281 | /* | 285 | /* |
282 | * XXX: We only call this function for DATA and DATAACK, on, these | 286 | * This function is called only for Data and DataAck packets. Sending |
283 | * packets can have zero length, but why the comment about "pure ACK"? | 287 | * zero-sized Data(Ack)s is theoretically possible, but for congestion |
288 | * control this case is pathological - ignore it. | ||
284 | */ | 289 | */ |
285 | if (unlikely(len == 0)) | 290 | if (unlikely(skb->len == 0)) |
286 | goto out; | 291 | return -EBADMSG; |
287 | 292 | ||
288 | /* See if last packet allocated was not sent */ | 293 | /* See if last packet allocated was not sent */ |
289 | new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); | 294 | new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); |
@@ -291,12 +296,10 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, | |||
291 | new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist, | 296 | new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist, |
292 | SLAB_ATOMIC); | 297 | SLAB_ATOMIC); |
293 | 298 | ||
294 | rc = -ENOBUFS; | ||
295 | if (unlikely(new_packet == NULL)) { | 299 | if (unlikely(new_packet == NULL)) { |
296 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, not enough " | 300 | DCCP_WARN("%s, sk=%p, not enough mem to add to history," |
297 | "mem to add to history, send refused\n", | 301 | "send refused\n", dccp_role(sk), sk); |
298 | __FUNCTION__, dccp_role(sk), sk); | 302 | return -ENOBUFS; |
299 | goto out; | ||
300 | } | 303 | } |
301 | 304 | ||
302 | dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet); | 305 | dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet); |
@@ -311,123 +314,94 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, | |||
311 | hctx->ccid3hctx_last_win_count = 0; | 314 | hctx->ccid3hctx_last_win_count = 0; |
312 | hctx->ccid3hctx_t_last_win_count = now; | 315 | hctx->ccid3hctx_t_last_win_count = now; |
313 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); | 316 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); |
314 | hctx->ccid3hctx_t_ipi = TFRC_INITIAL_IPI; | ||
315 | 317 | ||
316 | /* Set nominal send time for initial packet */ | 318 | /* Set initial sending rate to 1 packet per second */ |
319 | ccid3_hc_tx_update_s(hctx, skb->len); | ||
320 | hctx->ccid3hctx_x = hctx->ccid3hctx_s; | ||
321 | |||
322 | /* First timeout, according to [RFC 3448, 4.2], is 1 second */ | ||
323 | hctx->ccid3hctx_t_ipi = USEC_PER_SEC; | ||
324 | /* Initial delta: minimum of 0.5 sec and t_gran/2 */ | ||
325 | hctx->ccid3hctx_delta = TFRC_OPSYS_HALF_TIME_GRAN; | ||
326 | |||
327 | /* Set t_0 for initial packet */ | ||
317 | hctx->ccid3hctx_t_nom = now; | 328 | hctx->ccid3hctx_t_nom = now; |
318 | timeval_add_usecs(&hctx->ccid3hctx_t_nom, | ||
319 | hctx->ccid3hctx_t_ipi); | ||
320 | ccid3_calc_new_delta(hctx); | ||
321 | rc = 0; | ||
322 | break; | 329 | break; |
323 | case TFRC_SSTATE_NO_FBACK: | 330 | case TFRC_SSTATE_NO_FBACK: |
324 | case TFRC_SSTATE_FBACK: | 331 | case TFRC_SSTATE_FBACK: |
325 | delay = (timeval_delta(&now, &hctx->ccid3hctx_t_nom) - | 332 | delay = timeval_delta(&hctx->ccid3hctx_t_nom, &now); |
326 | hctx->ccid3hctx_delta); | 333 | /* |
327 | delay /= -1000; | 334 | * Scheduling of packet transmissions [RFC 3448, 4.6] |
328 | /* divide by -1000 is to convert to ms and get sign right */ | 335 | * |
329 | rc = delay > 0 ? delay : 0; | 336 | * if (t_now > t_nom - delta) |
330 | break; | 337 | * // send the packet now |
331 | default: | 338 | * else |
332 | printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n", | 339 | * // send the packet in (t_nom - t_now) milliseconds. |
333 | __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state); | 340 | */ |
334 | dump_stack(); | 341 | if (delay >= hctx->ccid3hctx_delta) |
335 | rc = -EINVAL; | 342 | return delay / 1000L; |
336 | break; | 343 | break; |
344 | case TFRC_SSTATE_TERM: | ||
345 | DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk); | ||
346 | return -EINVAL; | ||
337 | } | 347 | } |
338 | 348 | ||
339 | /* Can we send? if so add options and add to packet history */ | 349 | /* prepare to send now (add options etc.) */ |
340 | if (rc == 0) { | 350 | dp->dccps_hc_tx_insert_options = 1; |
341 | dp->dccps_hc_tx_insert_options = 1; | 351 | new_packet->dccphtx_ccval = DCCP_SKB_CB(skb)->dccpd_ccval = |
342 | new_packet->dccphtx_ccval = | 352 | hctx->ccid3hctx_last_win_count; |
343 | DCCP_SKB_CB(skb)->dccpd_ccval = | 353 | timeval_add_usecs(&hctx->ccid3hctx_t_nom, hctx->ccid3hctx_t_ipi); |
344 | hctx->ccid3hctx_last_win_count; | 354 | |
345 | timeval_add_usecs(&hctx->ccid3hctx_t_nom, | 355 | return 0; |
346 | hctx->ccid3hctx_t_ipi); | ||
347 | } | ||
348 | out: | ||
349 | return rc; | ||
350 | } | 356 | } |
351 | 357 | ||
352 | static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len) | 358 | static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) |
353 | { | 359 | { |
354 | const struct dccp_sock *dp = dccp_sk(sk); | 360 | const struct dccp_sock *dp = dccp_sk(sk); |
355 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); | 361 | struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); |
356 | struct timeval now; | 362 | struct timeval now; |
363 | unsigned long quarter_rtt; | ||
364 | struct dccp_tx_hist_entry *packet; | ||
357 | 365 | ||
358 | BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM); | 366 | BUG_ON(hctx == NULL); |
359 | 367 | ||
360 | dccp_timestamp(sk, &now); | 368 | dccp_timestamp(sk, &now); |
361 | 369 | ||
362 | /* check if we have sent a data packet */ | 370 | ccid3_hc_tx_update_s(hctx, len); |
363 | if (len > 0) { | ||
364 | unsigned long quarter_rtt; | ||
365 | struct dccp_tx_hist_entry *packet; | ||
366 | 371 | ||
367 | packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); | 372 | packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist); |
368 | if (unlikely(packet == NULL)) { | 373 | if (unlikely(packet == NULL)) { |
369 | LIMIT_NETDEBUG(KERN_WARNING "%s: packet doesn't " | 374 | DCCP_WARN("packet doesn't exist in history!\n"); |
370 | "exists in history!\n", __FUNCTION__); | ||
371 | return; | ||
372 | } | ||
373 | if (unlikely(packet->dccphtx_sent)) { | ||
374 | LIMIT_NETDEBUG(KERN_WARNING "%s: no unsent packet in " | ||
375 | "history!\n", __FUNCTION__); | ||
376 | return; | ||
377 | } | ||
378 | packet->dccphtx_tstamp = now; | ||
379 | packet->dccphtx_seqno = dp->dccps_gss; | ||
380 | /* | ||
381 | * Check if win_count have changed | ||
382 | * Algorithm in "8.1. Window Counter Value" in RFC 4342. | ||
383 | */ | ||
384 | quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count); | ||
385 | if (likely(hctx->ccid3hctx_rtt > 8)) | ||
386 | quarter_rtt /= hctx->ccid3hctx_rtt / 4; | ||
387 | |||
388 | if (quarter_rtt > 0) { | ||
389 | hctx->ccid3hctx_t_last_win_count = now; | ||
390 | hctx->ccid3hctx_last_win_count = (hctx->ccid3hctx_last_win_count + | ||
391 | min_t(unsigned long, quarter_rtt, 5)) % 16; | ||
392 | ccid3_pr_debug("%s, sk=%p, window changed from " | ||
393 | "%u to %u!\n", | ||
394 | dccp_role(sk), sk, | ||
395 | packet->dccphtx_ccval, | ||
396 | hctx->ccid3hctx_last_win_count); | ||
397 | } | ||
398 | |||
399 | hctx->ccid3hctx_idle = 0; | ||
400 | packet->dccphtx_rtt = hctx->ccid3hctx_rtt; | ||
401 | packet->dccphtx_sent = 1; | ||
402 | } else | ||
403 | ccid3_pr_debug("%s, sk=%p, seqno=%llu NOT inserted!\n", | ||
404 | dccp_role(sk), sk, dp->dccps_gss); | ||
405 | |||
406 | switch (hctx->ccid3hctx_state) { | ||
407 | case TFRC_SSTATE_NO_SENT: | ||
408 | /* if first wasn't pure ack */ | ||
409 | if (len != 0) | ||
410 | printk(KERN_CRIT "%s: %s, First packet sent is noted " | ||
411 | "as a data packet\n", | ||
412 | __FUNCTION__, dccp_role(sk)); | ||
413 | return; | 375 | return; |
414 | case TFRC_SSTATE_NO_FBACK: | ||
415 | case TFRC_SSTATE_FBACK: | ||
416 | if (len > 0) { | ||
417 | timeval_sub_usecs(&hctx->ccid3hctx_t_nom, | ||
418 | hctx->ccid3hctx_t_ipi); | ||
419 | ccid3_calc_new_t_ipi(hctx); | ||
420 | ccid3_calc_new_delta(hctx); | ||
421 | timeval_add_usecs(&hctx->ccid3hctx_t_nom, | ||
422 | hctx->ccid3hctx_t_ipi); | ||
423 | } | ||
424 | break; | ||
425 | default: | ||
426 | printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n", | ||
427 | __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state); | ||
428 | dump_stack(); | ||
429 | break; | ||
430 | } | 376 | } |
377 | if (unlikely(packet->dccphtx_sent)) { | ||
378 | DCCP_WARN("no unsent packet in history!\n"); | ||
379 | return; | ||
380 | } | ||
381 | packet->dccphtx_tstamp = now; | ||
382 | packet->dccphtx_seqno = dp->dccps_gss; | ||
383 | /* | ||
384 | * Check if win_count have changed | ||
385 | * Algorithm in "8.1. Window Counter Value" in RFC 4342. | ||
386 | */ | ||
387 | quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count); | ||
388 | if (likely(hctx->ccid3hctx_rtt > 8)) | ||
389 | quarter_rtt /= hctx->ccid3hctx_rtt / 4; | ||
390 | |||
391 | if (quarter_rtt > 0) { | ||
392 | hctx->ccid3hctx_t_last_win_count = now; | ||
393 | hctx->ccid3hctx_last_win_count = (hctx->ccid3hctx_last_win_count + | ||
394 | min_t(unsigned long, quarter_rtt, 5)) % 16; | ||
395 | ccid3_pr_debug("%s, sk=%p, window changed from " | ||
396 | "%u to %u!\n", | ||
397 | dccp_role(sk), sk, | ||
398 | packet->dccphtx_ccval, | ||
399 | hctx->ccid3hctx_last_win_count); | ||
400 | } | ||
401 | |||
402 | hctx->ccid3hctx_idle = 0; | ||
403 | packet->dccphtx_rtt = hctx->ccid3hctx_rtt; | ||
404 | packet->dccphtx_sent = 1; | ||
431 | } | 405 | } |
432 | 406 | ||
433 | static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | 407 | static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) |
@@ -437,13 +411,13 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
437 | struct ccid3_options_received *opt_recv; | 411 | struct ccid3_options_received *opt_recv; |
438 | struct dccp_tx_hist_entry *packet; | 412 | struct dccp_tx_hist_entry *packet; |
439 | struct timeval now; | 413 | struct timeval now; |
440 | unsigned long next_tmout; | 414 | unsigned long t_nfb; |
441 | u32 t_elapsed; | 415 | u32 t_elapsed; |
442 | u32 pinv; | 416 | u32 pinv; |
443 | u32 x_recv; | 417 | u32 x_recv; |
444 | u32 r_sample; | 418 | u32 r_sample; |
445 | 419 | ||
446 | BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM); | 420 | BUG_ON(hctx == NULL); |
447 | 421 | ||
448 | /* we are only interested in ACKs */ | 422 | /* we are only interested in ACKs */ |
449 | if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK || | 423 | if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK || |
@@ -457,9 +431,6 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
457 | pinv = opt_recv->ccid3or_loss_event_rate; | 431 | pinv = opt_recv->ccid3or_loss_event_rate; |
458 | 432 | ||
459 | switch (hctx->ccid3hctx_state) { | 433 | switch (hctx->ccid3hctx_state) { |
460 | case TFRC_SSTATE_NO_SENT: | ||
461 | /* FIXME: what to do here? */ | ||
462 | return; | ||
463 | case TFRC_SSTATE_NO_FBACK: | 434 | case TFRC_SSTATE_NO_FBACK: |
464 | case TFRC_SSTATE_FBACK: | 435 | case TFRC_SSTATE_FBACK: |
465 | /* Calculate new round trip sample by | 436 | /* Calculate new round trip sample by |
@@ -468,11 +439,10 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
468 | packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist, | 439 | packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist, |
469 | DCCP_SKB_CB(skb)->dccpd_ack_seq); | 440 | DCCP_SKB_CB(skb)->dccpd_ack_seq); |
470 | if (unlikely(packet == NULL)) { | 441 | if (unlikely(packet == NULL)) { |
471 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, seqno " | 442 | DCCP_WARN("%s, sk=%p, seqno %llu(%s) does't exist " |
472 | "%llu(%s) does't exist in history!\n", | 443 | "in history!\n", dccp_role(sk), sk, |
473 | __FUNCTION__, dccp_role(sk), sk, | ||
474 | (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, | 444 | (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, |
475 | dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type)); | 445 | dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type)); |
476 | return; | 446 | return; |
477 | } | 447 | } |
478 | 448 | ||
@@ -480,9 +450,8 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
480 | dccp_timestamp(sk, &now); | 450 | dccp_timestamp(sk, &now); |
481 | r_sample = timeval_delta(&now, &packet->dccphtx_tstamp); | 451 | r_sample = timeval_delta(&now, &packet->dccphtx_tstamp); |
482 | if (unlikely(r_sample <= t_elapsed)) | 452 | if (unlikely(r_sample <= t_elapsed)) |
483 | LIMIT_NETDEBUG(KERN_WARNING "%s: r_sample=%uus, " | 453 | DCCP_WARN("r_sample=%uus,t_elapsed=%uus\n", |
484 | "t_elapsed=%uus\n", | 454 | r_sample, t_elapsed); |
485 | __FUNCTION__, r_sample, t_elapsed); | ||
486 | else | 455 | else |
487 | r_sample -= t_elapsed; | 456 | r_sample -= t_elapsed; |
488 | 457 | ||
@@ -495,20 +464,26 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
495 | * q is a constant, RFC 3448 recomments 0.9 | 464 | * q is a constant, RFC 3448 recomments 0.9 |
496 | */ | 465 | */ |
497 | if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) { | 466 | if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) { |
467 | /* Use Larger Initial Windows [RFC 4342, sec. 5] | ||
468 | * We deviate in that we use `s' instead of `MSS'. */ | ||
469 | u16 w_init = max( 4 * hctx->ccid3hctx_s, | ||
470 | max(2 * hctx->ccid3hctx_s, 4380)); | ||
471 | hctx->ccid3hctx_rtt = r_sample; | ||
472 | hctx->ccid3hctx_x = usecs_div(w_init, r_sample); | ||
473 | hctx->ccid3hctx_t_ld = now; | ||
474 | |||
475 | ccid3_update_send_time(hctx); | ||
498 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); | 476 | ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); |
499 | hctx->ccid3hctx_rtt = r_sample; | 477 | } else { |
500 | } else | ||
501 | hctx->ccid3hctx_rtt = (hctx->ccid3hctx_rtt * 9) / 10 + | 478 | hctx->ccid3hctx_rtt = (hctx->ccid3hctx_rtt * 9) / 10 + |
502 | r_sample / 10; | 479 | r_sample / 10; |
480 | ccid3_hc_tx_update_x(sk, &now); | ||
481 | } | ||
503 | 482 | ||
504 | ccid3_pr_debug("%s, sk=%p, New RTT estimate=%uus, " | 483 | ccid3_pr_debug("%s, sk=%p, New RTT estimate=%uus, " |
505 | "r_sample=%us\n", dccp_role(sk), sk, | 484 | "r_sample=%us\n", dccp_role(sk), sk, |
506 | hctx->ccid3hctx_rtt, r_sample); | 485 | hctx->ccid3hctx_rtt, r_sample); |
507 | 486 | ||
508 | /* Update timeout interval */ | ||
509 | hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt, | ||
510 | USEC_PER_SEC); | ||
511 | |||
512 | /* Update receive rate */ | 487 | /* Update receive rate */ |
513 | hctx->ccid3hctx_x_recv = x_recv;/* X_recv in bytes per sec */ | 488 | hctx->ccid3hctx_x_recv = x_recv;/* X_recv in bytes per sec */ |
514 | 489 | ||
@@ -528,49 +503,41 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
528 | /* unschedule no feedback timer */ | 503 | /* unschedule no feedback timer */ |
529 | sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer); | 504 | sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer); |
530 | 505 | ||
531 | /* Update sending rate */ | ||
532 | ccid3_hc_tx_update_x(sk); | ||
533 | |||
534 | /* Update next send time */ | ||
535 | timeval_sub_usecs(&hctx->ccid3hctx_t_nom, | ||
536 | hctx->ccid3hctx_t_ipi); | ||
537 | ccid3_calc_new_t_ipi(hctx); | ||
538 | timeval_add_usecs(&hctx->ccid3hctx_t_nom, | ||
539 | hctx->ccid3hctx_t_ipi); | ||
540 | ccid3_calc_new_delta(hctx); | ||
541 | |||
542 | /* remove all packets older than the one acked from history */ | 506 | /* remove all packets older than the one acked from history */ |
543 | dccp_tx_hist_purge_older(ccid3_tx_hist, | 507 | dccp_tx_hist_purge_older(ccid3_tx_hist, |
544 | &hctx->ccid3hctx_hist, packet); | 508 | &hctx->ccid3hctx_hist, packet); |
545 | /* | 509 | /* |
546 | * As we have calculated new ipi, delta, t_nom it is possible that | 510 | * As we have calculated new ipi, delta, t_nom it is possible that |
547 | * we now can send a packet, so wake up dccp_wait_for_ccids. | 511 | * we now can send a packet, so wake up dccp_wait_for_ccid |
548 | */ | 512 | */ |
549 | sk->sk_write_space(sk); | 513 | sk->sk_write_space(sk); |
550 | 514 | ||
515 | /* Update timeout interval. We use the alternative variant of | ||
516 | * [RFC 3448, 3.1] which sets the upper bound of t_rto to one | ||
517 | * second, as it is suggested for TCP (see RFC 2988, 2.4). */ | ||
518 | hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt, | ||
519 | USEC_PER_SEC ); | ||
551 | /* | 520 | /* |
552 | * Schedule no feedback timer to expire in | 521 | * Schedule no feedback timer to expire in |
553 | * max(4 * R, 2 * s / X) | 522 | * max(4 * R, 2 * s/X) = max(4 * R, 2 * t_ipi) |
554 | */ | 523 | */ |
555 | next_tmout = max(hctx->ccid3hctx_t_rto, | 524 | t_nfb = max(4 * hctx->ccid3hctx_rtt, 2 * hctx->ccid3hctx_t_ipi); |
556 | 2 * usecs_div(hctx->ccid3hctx_s, | ||
557 | hctx->ccid3hctx_x)); | ||
558 | 525 | ||
559 | ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to " | 526 | ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to " |
560 | "expire in %lu jiffies (%luus)\n", | 527 | "expire in %lu jiffies (%luus)\n", |
561 | dccp_role(sk), sk, | 528 | dccp_role(sk), sk, |
562 | usecs_to_jiffies(next_tmout), next_tmout); | 529 | usecs_to_jiffies(t_nfb), t_nfb); |
563 | 530 | ||
564 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, | 531 | sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer, |
565 | jiffies + max_t(u32, 1, usecs_to_jiffies(next_tmout))); | 532 | jiffies + usecs_to_jiffies(t_nfb)); |
566 | 533 | ||
567 | /* set idle flag */ | 534 | /* set idle flag */ |
568 | hctx->ccid3hctx_idle = 1; | 535 | hctx->ccid3hctx_idle = 1; |
569 | break; | 536 | break; |
570 | default: | 537 | case TFRC_SSTATE_NO_SENT: |
571 | printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n", | 538 | DCCP_WARN("Illegal ACK received - no packet has been sent\n"); |
572 | __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state); | 539 | /* fall through */ |
573 | dump_stack(); | 540 | case TFRC_SSTATE_TERM: /* ignore feedback when closing */ |
574 | break; | 541 | break; |
575 | } | 542 | } |
576 | } | 543 | } |
@@ -610,9 +577,9 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
610 | switch (option) { | 577 | switch (option) { |
611 | case TFRC_OPT_LOSS_EVENT_RATE: | 578 | case TFRC_OPT_LOSS_EVENT_RATE: |
612 | if (unlikely(len != 4)) { | 579 | if (unlikely(len != 4)) { |
613 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, invalid " | 580 | DCCP_WARN("%s, sk=%p, invalid len %d " |
614 | "len for TFRC_OPT_LOSS_EVENT_RATE\n", | 581 | "for TFRC_OPT_LOSS_EVENT_RATE\n", |
615 | __FUNCTION__, dccp_role(sk), sk); | 582 | dccp_role(sk), sk, len); |
616 | rc = -EINVAL; | 583 | rc = -EINVAL; |
617 | } else { | 584 | } else { |
618 | opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value); | 585 | opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value); |
@@ -631,9 +598,9 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
631 | break; | 598 | break; |
632 | case TFRC_OPT_RECEIVE_RATE: | 599 | case TFRC_OPT_RECEIVE_RATE: |
633 | if (unlikely(len != 4)) { | 600 | if (unlikely(len != 4)) { |
634 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, invalid " | 601 | DCCP_WARN("%s, sk=%p, invalid len %d " |
635 | "len for TFRC_OPT_RECEIVE_RATE\n", | 602 | "for TFRC_OPT_RECEIVE_RATE\n", |
636 | __FUNCTION__, dccp_role(sk), sk); | 603 | dccp_role(sk), sk, len); |
637 | rc = -EINVAL; | 604 | rc = -EINVAL; |
638 | } else { | 605 | } else { |
639 | opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value); | 606 | opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value); |
@@ -649,18 +616,9 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, | |||
649 | 616 | ||
650 | static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) | 617 | static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) |
651 | { | 618 | { |
652 | struct dccp_sock *dp = dccp_sk(sk); | ||
653 | struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); | 619 | struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid); |
654 | 620 | ||
655 | if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE && | 621 | hctx->ccid3hctx_s = 0; |
656 | dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE) | ||
657 | hctx->ccid3hctx_s = dp->dccps_packet_size; | ||
658 | else | ||
659 | hctx->ccid3hctx_s = TFRC_STD_PACKET_SIZE; | ||
660 | |||
661 | /* Set transmission rate to 1 packet per second */ | ||
662 | hctx->ccid3hctx_x = hctx->ccid3hctx_s; | ||
663 | hctx->ccid3hctx_t_rto = USEC_PER_SEC; | ||
664 | hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; | 622 | hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT; |
665 | INIT_LIST_HEAD(&hctx->ccid3hctx_hist); | 623 | INIT_LIST_HEAD(&hctx->ccid3hctx_hist); |
666 | 624 | ||
@@ -688,14 +646,7 @@ static void ccid3_hc_tx_exit(struct sock *sk) | |||
688 | * RX Half Connection methods | 646 | * RX Half Connection methods |
689 | */ | 647 | */ |
690 | 648 | ||
691 | /* TFRC receiver states */ | 649 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG |
692 | enum ccid3_hc_rx_states { | ||
693 | TFRC_RSTATE_NO_DATA = 1, | ||
694 | TFRC_RSTATE_DATA, | ||
695 | TFRC_RSTATE_TERM = 127, | ||
696 | }; | ||
697 | |||
698 | #ifdef CCID3_DEBUG | ||
699 | static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state) | 650 | static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state) |
700 | { | 651 | { |
701 | static char *ccid3_rx_state_names[] = { | 652 | static char *ccid3_rx_state_names[] = { |
@@ -721,6 +672,15 @@ static void ccid3_hc_rx_set_state(struct sock *sk, | |||
721 | hcrx->ccid3hcrx_state = state; | 672 | hcrx->ccid3hcrx_state = state; |
722 | } | 673 | } |
723 | 674 | ||
675 | static inline void ccid3_hc_rx_update_s(struct ccid3_hc_rx_sock *hcrx, int len) | ||
676 | { | ||
677 | if (unlikely(len == 0)) /* don't update on empty packets (e.g. ACKs) */ | ||
678 | ccid3_pr_debug("Packet payload length is 0 - not updating\n"); | ||
679 | else | ||
680 | hcrx->ccid3hcrx_s = hcrx->ccid3hcrx_s == 0 ? len : | ||
681 | (9 * hcrx->ccid3hcrx_s + len) / 10; | ||
682 | } | ||
683 | |||
724 | static void ccid3_hc_rx_send_feedback(struct sock *sk) | 684 | static void ccid3_hc_rx_send_feedback(struct sock *sk) |
725 | { | 685 | { |
726 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); | 686 | struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); |
@@ -743,18 +703,15 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk) | |||
743 | delta); | 703 | delta); |
744 | } | 704 | } |
745 | break; | 705 | break; |
746 | default: | 706 | case TFRC_RSTATE_TERM: |
747 | printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n", | 707 | DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk); |
748 | __FUNCTION__, dccp_role(sk), sk, hcrx->ccid3hcrx_state); | ||
749 | dump_stack(); | ||
750 | return; | 708 | return; |
751 | } | 709 | } |
752 | 710 | ||
753 | packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist); | 711 | packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist); |
754 | if (unlikely(packet == NULL)) { | 712 | if (unlikely(packet == NULL)) { |
755 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, no data packet " | 713 | DCCP_WARN("%s, sk=%p, no data packet in history!\n", |
756 | "in history!\n", | 714 | dccp_role(sk), sk); |
757 | __FUNCTION__, dccp_role(sk), sk); | ||
758 | return; | 715 | return; |
759 | } | 716 | } |
760 | 717 | ||
@@ -842,29 +799,29 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk) | |||
842 | } | 799 | } |
843 | 800 | ||
844 | if (unlikely(step == 0)) { | 801 | if (unlikely(step == 0)) { |
845 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, packet history " | 802 | DCCP_WARN("%s, sk=%p, packet history has no data packets!\n", |
846 | "contains no data packets!\n", | 803 | dccp_role(sk), sk); |
847 | __FUNCTION__, dccp_role(sk), sk); | ||
848 | return ~0; | 804 | return ~0; |
849 | } | 805 | } |
850 | 806 | ||
851 | if (unlikely(interval == 0)) { | 807 | if (unlikely(interval == 0)) { |
852 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, Could not find a " | 808 | DCCP_WARN("%s, sk=%p, Could not find a win_count interval > 0." |
853 | "win_count interval > 0. Defaulting to 1\n", | 809 | "Defaulting to 1\n", dccp_role(sk), sk); |
854 | __FUNCTION__, dccp_role(sk), sk); | ||
855 | interval = 1; | 810 | interval = 1; |
856 | } | 811 | } |
857 | found: | 812 | found: |
858 | if (!tail) { | 813 | if (!tail) { |
859 | LIMIT_NETDEBUG(KERN_WARNING "%s: tail is null\n", | 814 | DCCP_CRIT("tail is null\n"); |
860 | __FUNCTION__); | ||
861 | return ~0; | 815 | return ~0; |
862 | } | 816 | } |
863 | rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval; | 817 | rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval; |
864 | ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n", | 818 | ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n", |
865 | dccp_role(sk), sk, rtt); | 819 | dccp_role(sk), sk, rtt); |
866 | if (rtt == 0) | 820 | |
867 | rtt = 1; | 821 | if (rtt == 0) { |
822 | DCCP_WARN("RTT==0, setting to 1\n"); | ||
823 | rtt = 1; | ||
824 | } | ||
868 | 825 | ||
869 | dccp_timestamp(sk, &tstamp); | 826 | dccp_timestamp(sk, &tstamp); |
870 | delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback); | 827 | delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback); |
@@ -878,9 +835,7 @@ found: | |||
878 | tmp2 = (u32)tmp1; | 835 | tmp2 = (u32)tmp1; |
879 | 836 | ||
880 | if (!tmp2) { | 837 | if (!tmp2) { |
881 | LIMIT_NETDEBUG(KERN_WARNING "tmp2 = 0 " | 838 | DCCP_CRIT("tmp2 = 0, x_recv = %u, rtt =%u\n", x_recv, rtt); |
882 | "%s: x_recv = %u, rtt =%u\n", | ||
883 | __FUNCTION__, x_recv, rtt); | ||
884 | return ~0; | 839 | return ~0; |
885 | } | 840 | } |
886 | 841 | ||
@@ -926,8 +881,7 @@ static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) | |||
926 | entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC); | 881 | entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC); |
927 | 882 | ||
928 | if (entry == NULL) { | 883 | if (entry == NULL) { |
929 | printk(KERN_CRIT "%s: out of memory\n",__FUNCTION__); | 884 | DCCP_BUG("out of memory - can not allocate entry"); |
930 | dump_stack(); | ||
931 | return; | 885 | return; |
932 | } | 886 | } |
933 | 887 | ||
@@ -1002,13 +956,10 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1002 | const struct dccp_options_received *opt_recv; | 956 | const struct dccp_options_received *opt_recv; |
1003 | struct dccp_rx_hist_entry *packet; | 957 | struct dccp_rx_hist_entry *packet; |
1004 | struct timeval now; | 958 | struct timeval now; |
1005 | u8 win_count; | ||
1006 | u32 p_prev, rtt_prev, r_sample, t_elapsed; | 959 | u32 p_prev, rtt_prev, r_sample, t_elapsed; |
1007 | int loss; | 960 | int loss, payload_size; |
1008 | 961 | ||
1009 | BUG_ON(hcrx == NULL || | 962 | BUG_ON(hcrx == NULL); |
1010 | !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA || | ||
1011 | hcrx->ccid3hcrx_state == TFRC_RSTATE_DATA)); | ||
1012 | 963 | ||
1013 | opt_recv = &dccp_sk(sk)->dccps_options_received; | 964 | opt_recv = &dccp_sk(sk)->dccps_options_received; |
1014 | 965 | ||
@@ -1026,9 +977,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1026 | t_elapsed = opt_recv->dccpor_elapsed_time * 10; | 977 | t_elapsed = opt_recv->dccpor_elapsed_time * 10; |
1027 | 978 | ||
1028 | if (unlikely(r_sample <= t_elapsed)) | 979 | if (unlikely(r_sample <= t_elapsed)) |
1029 | LIMIT_NETDEBUG(KERN_WARNING "%s: r_sample=%uus, " | 980 | DCCP_WARN("r_sample=%uus, t_elapsed=%uus\n", |
1030 | "t_elapsed=%uus\n", | 981 | r_sample, t_elapsed); |
1031 | __FUNCTION__, r_sample, t_elapsed); | ||
1032 | else | 982 | else |
1033 | r_sample -= t_elapsed; | 983 | r_sample -= t_elapsed; |
1034 | 984 | ||
@@ -1052,19 +1002,19 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1052 | packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, | 1002 | packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, |
1053 | skb, SLAB_ATOMIC); | 1003 | skb, SLAB_ATOMIC); |
1054 | if (unlikely(packet == NULL)) { | 1004 | if (unlikely(packet == NULL)) { |
1055 | LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, Not enough mem to " | 1005 | DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet " |
1056 | "add rx packet to history, consider it lost!\n", | 1006 | "to history, consider it lost!\n", dccp_role(sk), sk); |
1057 | __FUNCTION__, dccp_role(sk), sk); | ||
1058 | return; | 1007 | return; |
1059 | } | 1008 | } |
1060 | 1009 | ||
1061 | win_count = packet->dccphrx_ccval; | ||
1062 | |||
1063 | loss = ccid3_hc_rx_detect_loss(sk, packet); | 1010 | loss = ccid3_hc_rx_detect_loss(sk, packet); |
1064 | 1011 | ||
1065 | if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK) | 1012 | if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK) |
1066 | return; | 1013 | return; |
1067 | 1014 | ||
1015 | payload_size = skb->len - dccp_hdr(skb)->dccph_doff * 4; | ||
1016 | ccid3_hc_rx_update_s(hcrx, payload_size); | ||
1017 | |||
1068 | switch (hcrx->ccid3hcrx_state) { | 1018 | switch (hcrx->ccid3hcrx_state) { |
1069 | case TFRC_RSTATE_NO_DATA: | 1019 | case TFRC_RSTATE_NO_DATA: |
1070 | ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial " | 1020 | ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial " |
@@ -1075,8 +1025,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1075 | ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA); | 1025 | ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA); |
1076 | return; | 1026 | return; |
1077 | case TFRC_RSTATE_DATA: | 1027 | case TFRC_RSTATE_DATA: |
1078 | hcrx->ccid3hcrx_bytes_recv += skb->len - | 1028 | hcrx->ccid3hcrx_bytes_recv += payload_size; |
1079 | dccp_hdr(skb)->dccph_doff * 4; | ||
1080 | if (loss) | 1029 | if (loss) |
1081 | break; | 1030 | break; |
1082 | 1031 | ||
@@ -1087,10 +1036,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1087 | ccid3_hc_rx_send_feedback(sk); | 1036 | ccid3_hc_rx_send_feedback(sk); |
1088 | } | 1037 | } |
1089 | return; | 1038 | return; |
1090 | default: | 1039 | case TFRC_RSTATE_TERM: |
1091 | printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n", | 1040 | DCCP_BUG("Illegal %s state TERM, sk=%p", dccp_role(sk), sk); |
1092 | __FUNCTION__, dccp_role(sk), sk, hcrx->ccid3hcrx_state); | ||
1093 | dump_stack(); | ||
1094 | return; | 1041 | return; |
1095 | } | 1042 | } |
1096 | 1043 | ||
@@ -1107,10 +1054,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1107 | /* Scaling up by 1000000 as fixed decimal */ | 1054 | /* Scaling up by 1000000 as fixed decimal */ |
1108 | if (i_mean != 0) | 1055 | if (i_mean != 0) |
1109 | hcrx->ccid3hcrx_p = 1000000 / i_mean; | 1056 | hcrx->ccid3hcrx_p = 1000000 / i_mean; |
1110 | } else { | 1057 | } else |
1111 | printk(KERN_CRIT "%s: empty loss hist\n",__FUNCTION__); | 1058 | DCCP_BUG("empty loss history"); |
1112 | dump_stack(); | ||
1113 | } | ||
1114 | 1059 | ||
1115 | if (hcrx->ccid3hcrx_p > p_prev) { | 1060 | if (hcrx->ccid3hcrx_p > p_prev) { |
1116 | ccid3_hc_rx_send_feedback(sk); | 1061 | ccid3_hc_rx_send_feedback(sk); |
@@ -1120,22 +1065,16 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1120 | 1065 | ||
1121 | static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) | 1066 | static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) |
1122 | { | 1067 | { |
1123 | struct dccp_sock *dp = dccp_sk(sk); | ||
1124 | struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid); | 1068 | struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid); |
1125 | 1069 | ||
1126 | ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk); | 1070 | ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk); |
1127 | 1071 | ||
1128 | if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE && | ||
1129 | dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE) | ||
1130 | hcrx->ccid3hcrx_s = dp->dccps_packet_size; | ||
1131 | else | ||
1132 | hcrx->ccid3hcrx_s = TFRC_STD_PACKET_SIZE; | ||
1133 | |||
1134 | hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA; | 1072 | hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA; |
1135 | INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist); | 1073 | INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist); |
1136 | INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist); | 1074 | INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist); |
1137 | dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack); | 1075 | dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack); |
1138 | hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack; | 1076 | hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack; |
1077 | hcrx->ccid3hcrx_s = 0; | ||
1139 | hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */ | 1078 | hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */ |
1140 | return 0; | 1079 | return 0; |
1141 | } | 1080 | } |
@@ -1261,8 +1200,10 @@ static struct ccid_operations ccid3 = { | |||
1261 | .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt, | 1200 | .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt, |
1262 | }; | 1201 | }; |
1263 | 1202 | ||
1203 | #ifdef CONFIG_IP_DCCP_CCID3_DEBUG | ||
1264 | module_param(ccid3_debug, int, 0444); | 1204 | module_param(ccid3_debug, int, 0444); |
1265 | MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); | 1205 | MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); |
1206 | #endif | ||
1266 | 1207 | ||
1267 | static __init int ccid3_module_init(void) | 1208 | static __init int ccid3_module_init(void) |
1268 | { | 1209 | { |
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h index 0a2cb7536d26..27cb20ae1da8 100644 --- a/net/dccp/ccids/ccid3.h +++ b/net/dccp/ccids/ccid3.h | |||
@@ -42,20 +42,14 @@ | |||
42 | #include <linux/tfrc.h> | 42 | #include <linux/tfrc.h> |
43 | #include "../ccid.h" | 43 | #include "../ccid.h" |
44 | 44 | ||
45 | #define TFRC_MIN_PACKET_SIZE 16 | 45 | /* Two seconds as per RFC 3448 4.2 */ |
46 | #define TFRC_STD_PACKET_SIZE 256 | ||
47 | #define TFRC_MAX_PACKET_SIZE 65535 | ||
48 | |||
49 | /* Two seconds as per CCID3 spec */ | ||
50 | #define TFRC_INITIAL_TIMEOUT (2 * USEC_PER_SEC) | 46 | #define TFRC_INITIAL_TIMEOUT (2 * USEC_PER_SEC) |
51 | 47 | ||
52 | #define TFRC_INITIAL_IPI (USEC_PER_SEC / 4) | ||
53 | |||
54 | /* In usecs - half the scheduling granularity as per RFC3448 4.6 */ | 48 | /* In usecs - half the scheduling granularity as per RFC3448 4.6 */ |
55 | #define TFRC_OPSYS_HALF_TIME_GRAN (USEC_PER_SEC / (2 * HZ)) | 49 | #define TFRC_OPSYS_HALF_TIME_GRAN (USEC_PER_SEC / (2 * HZ)) |
56 | 50 | ||
57 | /* In seconds */ | 51 | /* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */ |
58 | #define TFRC_MAX_BACK_OFF_TIME 64 | 52 | #define TFRC_T_MBI 64 |
59 | 53 | ||
60 | #define TFRC_SMALLEST_P 40 | 54 | #define TFRC_SMALLEST_P 40 |
61 | 55 | ||
@@ -73,26 +67,36 @@ struct ccid3_options_received { | |||
73 | u32 ccid3or_receive_rate; | 67 | u32 ccid3or_receive_rate; |
74 | }; | 68 | }; |
75 | 69 | ||
76 | /** struct ccid3_hc_tx_sock - CCID3 sender half connection sock | 70 | /* TFRC sender states */ |
71 | enum ccid3_hc_tx_states { | ||
72 | TFRC_SSTATE_NO_SENT = 1, | ||
73 | TFRC_SSTATE_NO_FBACK, | ||
74 | TFRC_SSTATE_FBACK, | ||
75 | TFRC_SSTATE_TERM, | ||
76 | }; | ||
77 | |||
78 | /** struct ccid3_hc_tx_sock - CCID3 sender half-connection socket | ||
77 | * | 79 | * |
78 | * @ccid3hctx_state - Sender state | 80 | * @ccid3hctx_x - Current sending rate |
79 | * @ccid3hctx_x - Current sending rate | 81 | * @ccid3hctx_x_recv - Receive rate |
80 | * @ccid3hctx_x_recv - Receive rate | 82 | * @ccid3hctx_x_calc - Calculated send rate (RFC 3448, 3.1) |
81 | * @ccid3hctx_x_calc - Calculated send (?) rate | 83 | * @ccid3hctx_rtt - Estimate of current round trip time in usecs |
82 | * @ccid3hctx_s - Packet size | 84 | * @ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000 |
83 | * @ccid3hctx_rtt - Estimate of current round trip time in usecs | 85 | * @ccid3hctx_s - Packet size |
84 | * @@ccid3hctx_p - Current loss event rate (0-1) scaled by 1000000 | 86 | * @ccid3hctx_t_rto - Retransmission Timeout (RFC 3448, 3.1) |
85 | * @ccid3hctx_last_win_count - Last window counter sent | 87 | * @ccid3hctx_t_ipi - Interpacket (send) interval (RFC 3448, 4.6) |
86 | * @ccid3hctx_t_last_win_count - Timestamp of earliest packet | 88 | * @ccid3hctx_state - Sender state, one of %ccid3_hc_tx_states |
87 | * with last_win_count value sent | 89 | * @ccid3hctx_last_win_count - Last window counter sent |
88 | * @ccid3hctx_no_feedback_timer - Handle to no feedback timer | 90 | * @ccid3hctx_t_last_win_count - Timestamp of earliest packet |
89 | * @ccid3hctx_idle - FIXME | 91 | * with last_win_count value sent |
90 | * @ccid3hctx_t_ld - Time last doubled during slow start | 92 | * @ccid3hctx_no_feedback_timer - Handle to no feedback timer |
91 | * @ccid3hctx_t_nom - Nominal send time of next packet | 93 | * @ccid3hctx_idle - Flag indicating that sender is idling |
92 | * @ccid3hctx_t_ipi - Interpacket (send) interval | 94 | * @ccid3hctx_t_ld - Time last doubled during slow start |
93 | * @ccid3hctx_delta - Send timer delta | 95 | * @ccid3hctx_t_nom - Nominal send time of next packet |
94 | * @ccid3hctx_hist - Packet history | 96 | * @ccid3hctx_delta - Send timer delta |
95 | */ | 97 | * @ccid3hctx_hist - Packet history |
98 | * @ccid3hctx_options_received - Parsed set of retrieved options | ||
99 | */ | ||
96 | struct ccid3_hc_tx_sock { | 100 | struct ccid3_hc_tx_sock { |
97 | struct tfrc_tx_info ccid3hctx_tfrc; | 101 | struct tfrc_tx_info ccid3hctx_tfrc; |
98 | #define ccid3hctx_x ccid3hctx_tfrc.tfrctx_x | 102 | #define ccid3hctx_x ccid3hctx_tfrc.tfrctx_x |
@@ -103,7 +107,7 @@ struct ccid3_hc_tx_sock { | |||
103 | #define ccid3hctx_t_rto ccid3hctx_tfrc.tfrctx_rto | 107 | #define ccid3hctx_t_rto ccid3hctx_tfrc.tfrctx_rto |
104 | #define ccid3hctx_t_ipi ccid3hctx_tfrc.tfrctx_ipi | 108 | #define ccid3hctx_t_ipi ccid3hctx_tfrc.tfrctx_ipi |
105 | u16 ccid3hctx_s; | 109 | u16 ccid3hctx_s; |
106 | u8 ccid3hctx_state; | 110 | enum ccid3_hc_tx_states ccid3hctx_state:8; |
107 | u8 ccid3hctx_last_win_count; | 111 | u8 ccid3hctx_last_win_count; |
108 | u8 ccid3hctx_idle; | 112 | u8 ccid3hctx_idle; |
109 | struct timeval ccid3hctx_t_last_win_count; | 113 | struct timeval ccid3hctx_t_last_win_count; |
@@ -115,23 +119,48 @@ struct ccid3_hc_tx_sock { | |||
115 | struct ccid3_options_received ccid3hctx_options_received; | 119 | struct ccid3_options_received ccid3hctx_options_received; |
116 | }; | 120 | }; |
117 | 121 | ||
122 | /* TFRC receiver states */ | ||
123 | enum ccid3_hc_rx_states { | ||
124 | TFRC_RSTATE_NO_DATA = 1, | ||
125 | TFRC_RSTATE_DATA, | ||
126 | TFRC_RSTATE_TERM = 127, | ||
127 | }; | ||
128 | |||
129 | /** struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket | ||
130 | * | ||
131 | * @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448 4.3) | ||
132 | * @ccid3hcrx_rtt - Receiver estimate of rtt (non-standard) | ||
133 | * @ccid3hcrx_p - current loss event rate (RFC 3448 5.4) | ||
134 | * @ccid3hcrx_seqno_nonloss - Last received non-loss sequence number | ||
135 | * @ccid3hcrx_ccval_nonloss - Last received non-loss Window CCVal | ||
136 | * @ccid3hcrx_ccval_last_counter - Tracks window counter (RFC 4342, 8.1) | ||
137 | * @ccid3hcrx_state - receiver state, one of %ccid3_hc_rx_states | ||
138 | * @ccid3hcrx_bytes_recv - Total sum of DCCP payload bytes | ||
139 | * @ccid3hcrx_tstamp_last_feedback - Time at which last feedback was sent | ||
140 | * @ccid3hcrx_tstamp_last_ack - Time at which last feedback was sent | ||
141 | * @ccid3hcrx_hist - Packet history | ||
142 | * @ccid3hcrx_li_hist - Loss Interval History | ||
143 | * @ccid3hcrx_s - Received packet size in bytes | ||
144 | * @ccid3hcrx_pinv - Inverse of Loss Event Rate (RFC 4342, sec. 8.5) | ||
145 | * @ccid3hcrx_elapsed_time - Time since packet reception | ||
146 | */ | ||
118 | struct ccid3_hc_rx_sock { | 147 | struct ccid3_hc_rx_sock { |
119 | struct tfrc_rx_info ccid3hcrx_tfrc; | 148 | struct tfrc_rx_info ccid3hcrx_tfrc; |
120 | #define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv | 149 | #define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv |
121 | #define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt | 150 | #define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt |
122 | #define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p | 151 | #define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p |
123 | u64 ccid3hcrx_seqno_nonloss:48, | 152 | u64 ccid3hcrx_seqno_nonloss:48, |
124 | ccid3hcrx_ccval_nonloss:4, | 153 | ccid3hcrx_ccval_nonloss:4, |
125 | ccid3hcrx_state:8, | 154 | ccid3hcrx_ccval_last_counter:4; |
126 | ccid3hcrx_ccval_last_counter:4; | 155 | enum ccid3_hc_rx_states ccid3hcrx_state:8; |
127 | u32 ccid3hcrx_bytes_recv; | 156 | u32 ccid3hcrx_bytes_recv; |
128 | struct timeval ccid3hcrx_tstamp_last_feedback; | 157 | struct timeval ccid3hcrx_tstamp_last_feedback; |
129 | struct timeval ccid3hcrx_tstamp_last_ack; | 158 | struct timeval ccid3hcrx_tstamp_last_ack; |
130 | struct list_head ccid3hcrx_hist; | 159 | struct list_head ccid3hcrx_hist; |
131 | struct list_head ccid3hcrx_li_hist; | 160 | struct list_head ccid3hcrx_li_hist; |
132 | u16 ccid3hcrx_s; | 161 | u16 ccid3hcrx_s; |
133 | u32 ccid3hcrx_pinv; | 162 | u32 ccid3hcrx_pinv; |
134 | u32 ccid3hcrx_elapsed_time; | 163 | u32 ccid3hcrx_elapsed_time; |
135 | }; | 164 | }; |
136 | 165 | ||
137 | static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) | 166 | static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) |
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index 906c81ab9d4f..48b9b93f8acb 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <net/sock.h> | 15 | #include <net/sock.h> |
16 | 16 | #include "../../dccp.h" | |
17 | #include "loss_interval.h" | 17 | #include "loss_interval.h" |
18 | 18 | ||
19 | struct dccp_li_hist *dccp_li_hist_new(const char *name) | 19 | struct dccp_li_hist *dccp_li_hist_new(const char *name) |
@@ -109,7 +109,7 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list) | |||
109 | i_tot = max(i_tot0, i_tot1); | 109 | i_tot = max(i_tot0, i_tot1); |
110 | 110 | ||
111 | if (!w_tot) { | 111 | if (!w_tot) { |
112 | LIMIT_NETDEBUG(KERN_WARNING "%s: w_tot = 0\n", __FUNCTION__); | 112 | DCCP_WARN("w_tot = 0\n"); |
113 | return 1; | 113 | return 1; |
114 | } | 114 | } |
115 | 115 | ||
@@ -128,7 +128,7 @@ int dccp_li_hist_interval_new(struct dccp_li_hist *hist, | |||
128 | entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); | 128 | entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); |
129 | if (entry == NULL) { | 129 | if (entry == NULL) { |
130 | dccp_li_hist_purge(hist, list); | 130 | dccp_li_hist_purge(hist, list); |
131 | dump_stack(); | 131 | DCCP_BUG("loss interval list entry is NULL"); |
132 | return 0; | 132 | return 0; |
133 | } | 133 | } |
134 | entry->dccplih_interval = ~0; | 134 | entry->dccplih_interval = ~0; |
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c index 44076e0c6591..2601012383fb 100644 --- a/net/dccp/ccids/lib/tfrc_equation.c +++ b/net/dccp/ccids/lib/tfrc_equation.c | |||
@@ -13,9 +13,8 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | |||
17 | #include <asm/div64.h> | 16 | #include <asm/div64.h> |
18 | 17 | #include "../../dccp.h" | |
19 | #include "tfrc.h" | 18 | #include "tfrc.h" |
20 | 19 | ||
21 | #define TFRC_CALC_X_ARRSIZE 500 | 20 | #define TFRC_CALC_X_ARRSIZE 500 |
@@ -588,8 +587,10 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p) | |||
588 | /* p should be 0 unless there is a bug in my code */ | 587 | /* p should be 0 unless there is a bug in my code */ |
589 | index = 0; | 588 | index = 0; |
590 | 589 | ||
591 | if (R == 0) | 590 | if (R == 0) { |
591 | DCCP_WARN("RTT==0, setting to 1\n"); | ||
592 | R = 1; /* RTT can't be zero or else divide by zero */ | 592 | R = 1; /* RTT can't be zero or else divide by zero */ |
593 | } | ||
593 | 594 | ||
594 | BUG_ON(index >= TFRC_CALC_X_ARRSIZE); | 595 | BUG_ON(index >= TFRC_CALC_X_ARRSIZE); |
595 | 596 | ||
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 272e8584564e..68886986c8e4 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h | |||
@@ -18,15 +18,33 @@ | |||
18 | #include <net/tcp.h> | 18 | #include <net/tcp.h> |
19 | #include "ackvec.h" | 19 | #include "ackvec.h" |
20 | 20 | ||
21 | /* | ||
22 | * DCCP - specific warning and debugging macros. | ||
23 | */ | ||
24 | #define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \ | ||
25 | __FUNCTION__, ##a) | ||
26 | #define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \ | ||
27 | __FILE__, __LINE__, __FUNCTION__) | ||
28 | #define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0) | ||
29 | #define DCCP_BUG_ON(cond) do { if (unlikely((cond) != 0)) \ | ||
30 | DCCP_BUG("\"%s\" holds (exception!)", \ | ||
31 | __stringify(cond)); \ | ||
32 | } while (0) | ||
33 | |||
34 | #ifdef MODULE | ||
35 | #define DCCP_PRINTK(enable, fmt, args...) do { if (enable) \ | ||
36 | printk(fmt, ##args); \ | ||
37 | } while(0) | ||
38 | #else | ||
39 | #define DCCP_PRINTK(enable, fmt, args...) printk(fmt, ##args) | ||
40 | #endif | ||
41 | #define DCCP_PR_DEBUG(enable, fmt, a...) DCCP_PRINTK(enable, KERN_DEBUG \ | ||
42 | "%s: " fmt, __FUNCTION__, ##a) | ||
43 | |||
21 | #ifdef CONFIG_IP_DCCP_DEBUG | 44 | #ifdef CONFIG_IP_DCCP_DEBUG |
22 | extern int dccp_debug; | 45 | extern int dccp_debug; |
23 | 46 | #define dccp_pr_debug(format, a...) DCCP_PR_DEBUG(dccp_debug, format, ##a) | |
24 | #define dccp_pr_debug(format, a...) \ | 47 | #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) |
25 | do { if (dccp_debug) \ | ||
26 | printk(KERN_DEBUG "%s: " format, __FUNCTION__ , ##a); \ | ||
27 | } while (0) | ||
28 | #define dccp_pr_debug_cat(format, a...) do { if (dccp_debug) \ | ||
29 | printk(format, ##a); } while (0) | ||
30 | #else | 48 | #else |
31 | #define dccp_pr_debug(format, a...) | 49 | #define dccp_pr_debug(format, a...) |
32 | #define dccp_pr_debug_cat(format, a...) | 50 | #define dccp_pr_debug_cat(format, a...) |
@@ -35,17 +53,21 @@ extern int dccp_debug; | |||
35 | extern struct inet_hashinfo dccp_hashinfo; | 53 | extern struct inet_hashinfo dccp_hashinfo; |
36 | 54 | ||
37 | extern atomic_t dccp_orphan_count; | 55 | extern atomic_t dccp_orphan_count; |
38 | extern int dccp_tw_count; | ||
39 | extern void dccp_tw_deschedule(struct inet_timewait_sock *tw); | ||
40 | 56 | ||
41 | extern void dccp_time_wait(struct sock *sk, int state, int timeo); | 57 | extern void dccp_time_wait(struct sock *sk, int state, int timeo); |
42 | 58 | ||
43 | /* FIXME: Right size this */ | 59 | /* |
44 | #define DCCP_MAX_OPT_LEN 128 | 60 | * Set safe upper bounds for header and option length. Since Data Offset is 8 |
45 | 61 | * bits (RFC 4340, sec. 5.1), the total header length can never be more than | |
46 | #define DCCP_MAX_PACKET_HDR 32 | 62 | * 4 * 255 = 1020 bytes. The largest possible header length is 28 bytes (X=1): |
47 | 63 | * - DCCP-Response with ACK Subheader and 4 bytes of Service code OR | |
48 | #define MAX_DCCP_HEADER (DCCP_MAX_PACKET_HDR + DCCP_MAX_OPT_LEN + MAX_HEADER) | 64 | * - DCCP-Reset with ACK Subheader and 4 bytes of Reset Code fields |
65 | * Hence a safe upper bound for the maximum option length is 1020-28 = 992 | ||
66 | */ | ||
67 | #define MAX_DCCP_SPECIFIC_HEADER (255 * sizeof(int)) | ||
68 | #define DCCP_MAX_PACKET_HDR 28 | ||
69 | #define DCCP_MAX_OPT_LEN (MAX_DCCP_SPECIFIC_HEADER - DCCP_MAX_PACKET_HDR) | ||
70 | #define MAX_DCCP_HEADER (MAX_DCCP_SPECIFIC_HEADER + MAX_HEADER) | ||
49 | 71 | ||
50 | #define DCCP_TIMEWAIT_LEN (60 * HZ) /* how long to wait to destroy TIME-WAIT | 72 | #define DCCP_TIMEWAIT_LEN (60 * HZ) /* how long to wait to destroy TIME-WAIT |
51 | * state, about 60 seconds */ | 73 | * state, about 60 seconds */ |
@@ -58,6 +80,20 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo); | |||
58 | 80 | ||
59 | #define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */ | 81 | #define DCCP_RTO_MAX ((unsigned)(120 * HZ)) /* FIXME: using TCP value */ |
60 | 82 | ||
83 | #define DCCP_XMIT_TIMEO 30000 /* Time/msecs for blocking transmit per packet */ | ||
84 | |||
85 | /* sysctl variables for DCCP */ | ||
86 | extern int sysctl_dccp_request_retries; | ||
87 | extern int sysctl_dccp_retries1; | ||
88 | extern int sysctl_dccp_retries2; | ||
89 | extern int sysctl_dccp_feat_sequence_window; | ||
90 | extern int sysctl_dccp_feat_rx_ccid; | ||
91 | extern int sysctl_dccp_feat_tx_ccid; | ||
92 | extern int sysctl_dccp_feat_ack_ratio; | ||
93 | extern int sysctl_dccp_feat_send_ack_vector; | ||
94 | extern int sysctl_dccp_feat_send_ndp_count; | ||
95 | extern int sysctl_dccp_tx_qlen; | ||
96 | |||
61 | /* is seq1 < seq2 ? */ | 97 | /* is seq1 < seq2 ? */ |
62 | static inline int before48(const u64 seq1, const u64 seq2) | 98 | static inline int before48(const u64 seq1, const u64 seq2) |
63 | { | 99 | { |
@@ -123,10 +159,36 @@ DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); | |||
123 | #define DCCP_ADD_STATS_USER(field, val) \ | 159 | #define DCCP_ADD_STATS_USER(field, val) \ |
124 | SNMP_ADD_STATS_USER(dccp_statistics, field, val) | 160 | SNMP_ADD_STATS_USER(dccp_statistics, field, val) |
125 | 161 | ||
162 | /* | ||
163 | * Checksumming routines | ||
164 | */ | ||
165 | static inline int dccp_csum_coverage(const struct sk_buff *skb) | ||
166 | { | ||
167 | const struct dccp_hdr* dh = dccp_hdr(skb); | ||
168 | |||
169 | if (dh->dccph_cscov == 0) | ||
170 | return skb->len; | ||
171 | return (dh->dccph_doff + dh->dccph_cscov - 1) * sizeof(u32); | ||
172 | } | ||
173 | |||
174 | static inline void dccp_csum_outgoing(struct sk_buff *skb) | ||
175 | { | ||
176 | int cov = dccp_csum_coverage(skb); | ||
177 | |||
178 | if (cov >= skb->len) | ||
179 | dccp_hdr(skb)->dccph_cscov = 0; | ||
180 | |||
181 | skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0); | ||
182 | } | ||
183 | |||
184 | extern void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); | ||
185 | |||
126 | extern int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb); | 186 | extern int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb); |
127 | 187 | ||
128 | extern void dccp_send_ack(struct sock *sk); | 188 | extern void dccp_send_ack(struct sock *sk); |
129 | extern void dccp_send_delayed_ack(struct sock *sk); | 189 | extern void dccp_send_delayed_ack(struct sock *sk); |
190 | extern void dccp_reqsk_send_ack(struct sk_buff *sk, struct request_sock *rsk); | ||
191 | |||
130 | extern void dccp_send_sync(struct sock *sk, const u64 seq, | 192 | extern void dccp_send_sync(struct sock *sk, const u64 seq, |
131 | const enum dccp_pkt_type pkt_type); | 193 | const enum dccp_pkt_type pkt_type); |
132 | 194 | ||
@@ -147,18 +209,7 @@ extern const char *dccp_state_name(const int state); | |||
147 | extern void dccp_set_state(struct sock *sk, const int state); | 209 | extern void dccp_set_state(struct sock *sk, const int state); |
148 | extern void dccp_done(struct sock *sk); | 210 | extern void dccp_done(struct sock *sk); |
149 | 211 | ||
150 | static inline void dccp_openreq_init(struct request_sock *req, | 212 | extern void dccp_reqsk_init(struct request_sock *req, struct sk_buff *skb); |
151 | struct dccp_sock *dp, | ||
152 | struct sk_buff *skb) | ||
153 | { | ||
154 | /* | ||
155 | * FIXME: fill in the other req fields from the DCCP options | ||
156 | * received | ||
157 | */ | ||
158 | inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport; | ||
159 | inet_rsk(req)->acked = 0; | ||
160 | req->rcv_wnd = 0; | ||
161 | } | ||
162 | 213 | ||
163 | extern int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb); | 214 | extern int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb); |
164 | 215 | ||
@@ -217,14 +268,9 @@ extern void dccp_shutdown(struct sock *sk, int how); | |||
217 | extern int inet_dccp_listen(struct socket *sock, int backlog); | 268 | extern int inet_dccp_listen(struct socket *sock, int backlog); |
218 | extern unsigned int dccp_poll(struct file *file, struct socket *sock, | 269 | extern unsigned int dccp_poll(struct file *file, struct socket *sock, |
219 | poll_table *wait); | 270 | poll_table *wait); |
220 | extern void dccp_v4_send_check(struct sock *sk, int len, | ||
221 | struct sk_buff *skb); | ||
222 | extern int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, | 271 | extern int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, |
223 | int addr_len); | 272 | int addr_len); |
224 | 273 | ||
225 | extern int dccp_v4_checksum(const struct sk_buff *skb, | ||
226 | const __be32 saddr, const __be32 daddr); | ||
227 | |||
228 | extern int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code); | 274 | extern int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code); |
229 | extern void dccp_send_close(struct sock *sk, const int active); | 275 | extern void dccp_send_close(struct sock *sk, const int active); |
230 | extern int dccp_invalid_packet(struct sk_buff *skb); | 276 | extern int dccp_invalid_packet(struct sk_buff *skb); |
diff --git a/net/dccp/feat.c b/net/dccp/feat.c index a1b0682ee77c..4dc487f27a1f 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c | |||
@@ -12,7 +12,6 @@ | |||
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | 14 | ||
15 | #include "dccp.h" | ||
16 | #include "ccid.h" | 15 | #include "ccid.h" |
17 | #include "feat.h" | 16 | #include "feat.h" |
18 | 17 | ||
@@ -23,9 +22,17 @@ int dccp_feat_change(struct dccp_minisock *dmsk, u8 type, u8 feature, | |||
23 | { | 22 | { |
24 | struct dccp_opt_pend *opt; | 23 | struct dccp_opt_pend *opt; |
25 | 24 | ||
26 | dccp_pr_debug("feat change type=%d feat=%d\n", type, feature); | 25 | dccp_feat_debug(type, feature, *val); |
27 | 26 | ||
28 | /* XXX sanity check feat change request */ | 27 | if (!dccp_feat_is_valid_type(type)) { |
28 | DCCP_WARN("option type %d invalid in negotiation\n", type); | ||
29 | return 1; | ||
30 | } | ||
31 | if (!dccp_feat_is_valid_length(type, feature, len)) { | ||
32 | DCCP_WARN("invalid length %d\n", len); | ||
33 | return 1; | ||
34 | } | ||
35 | /* XXX add further sanity checks */ | ||
29 | 36 | ||
30 | /* check if that feature is already being negotiated */ | 37 | /* check if that feature is already being negotiated */ |
31 | list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) { | 38 | list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) { |
@@ -95,14 +102,14 @@ static int dccp_feat_update_ccid(struct sock *sk, u8 type, u8 new_ccid_nr) | |||
95 | /* XXX taking only u8 vals */ | 102 | /* XXX taking only u8 vals */ |
96 | static int dccp_feat_update(struct sock *sk, u8 type, u8 feat, u8 val) | 103 | static int dccp_feat_update(struct sock *sk, u8 type, u8 feat, u8 val) |
97 | { | 104 | { |
98 | dccp_pr_debug("changing [%d] feat %d to %d\n", type, feat, val); | 105 | dccp_feat_debug(type, feat, val); |
99 | 106 | ||
100 | switch (feat) { | 107 | switch (feat) { |
101 | case DCCPF_CCID: | 108 | case DCCPF_CCID: |
102 | return dccp_feat_update_ccid(sk, type, val); | 109 | return dccp_feat_update_ccid(sk, type, val); |
103 | default: | 110 | default: |
104 | dccp_pr_debug("IMPLEMENT changing [%d] feat %d to %d\n", | 111 | dccp_pr_debug("UNIMPLEMENTED: %s(%d, ...)\n", |
105 | type, feat, val); | 112 | dccp_feat_typename(type), feat); |
106 | break; | 113 | break; |
107 | } | 114 | } |
108 | return 0; | 115 | return 0; |
@@ -162,7 +169,8 @@ static int dccp_feat_reconcile(struct sock *sk, struct dccp_opt_pend *opt, | |||
162 | break; | 169 | break; |
163 | 170 | ||
164 | default: | 171 | default: |
165 | WARN_ON(1); /* XXX implement res */ | 172 | DCCP_BUG("Fell through, feat=%d", opt->dccpop_feat); |
173 | /* XXX implement res */ | ||
166 | return -EFAULT; | 174 | return -EFAULT; |
167 | } | 175 | } |
168 | 176 | ||
@@ -265,10 +273,10 @@ static int dccp_feat_nn(struct sock *sk, u8 type, u8 feature, u8 *val, u8 len) | |||
265 | u8 *copy; | 273 | u8 *copy; |
266 | int rc; | 274 | int rc; |
267 | 275 | ||
268 | /* NN features must be change L */ | 276 | /* NN features must be Change L (sec. 6.3.2) */ |
269 | if (type == DCCPO_CHANGE_R) { | 277 | if (type != DCCPO_CHANGE_L) { |
270 | dccp_pr_debug("received CHANGE_R %d for NN feat %d\n", | 278 | dccp_pr_debug("received %s for NN feature %d\n", |
271 | type, feature); | 279 | dccp_feat_typename(type), feature); |
272 | return -EFAULT; | 280 | return -EFAULT; |
273 | } | 281 | } |
274 | 282 | ||
@@ -279,12 +287,11 @@ static int dccp_feat_nn(struct sock *sk, u8 type, u8 feature, u8 *val, u8 len) | |||
279 | if (opt == NULL) | 287 | if (opt == NULL) |
280 | return -ENOMEM; | 288 | return -ENOMEM; |
281 | 289 | ||
282 | copy = kmalloc(len, GFP_ATOMIC); | 290 | copy = kmemdup(val, len, GFP_ATOMIC); |
283 | if (copy == NULL) { | 291 | if (copy == NULL) { |
284 | kfree(opt); | 292 | kfree(opt); |
285 | return -ENOMEM; | 293 | return -ENOMEM; |
286 | } | 294 | } |
287 | memcpy(copy, val, len); | ||
288 | 295 | ||
289 | opt->dccpop_type = DCCPO_CONFIRM_R; /* NN can only confirm R */ | 296 | opt->dccpop_type = DCCPO_CONFIRM_R; /* NN can only confirm R */ |
290 | opt->dccpop_feat = feature; | 297 | opt->dccpop_feat = feature; |
@@ -299,7 +306,8 @@ static int dccp_feat_nn(struct sock *sk, u8 type, u8 feature, u8 *val, u8 len) | |||
299 | return rc; | 306 | return rc; |
300 | } | 307 | } |
301 | 308 | ||
302 | dccp_pr_debug("Confirming NN feature %d (val=%d)\n", feature, *copy); | 309 | dccp_feat_debug(type, feature, *copy); |
310 | |||
303 | list_add_tail(&opt->dccpop_node, &dmsk->dccpms_conf); | 311 | list_add_tail(&opt->dccpop_node, &dmsk->dccpms_conf); |
304 | 312 | ||
305 | return 0; | 313 | return 0; |
@@ -318,14 +326,19 @@ static void dccp_feat_empty_confirm(struct dccp_minisock *dmsk, | |||
318 | return; | 326 | return; |
319 | } | 327 | } |
320 | 328 | ||
321 | opt->dccpop_type = type == DCCPO_CHANGE_L ? DCCPO_CONFIRM_R : | 329 | switch (type) { |
322 | DCCPO_CONFIRM_L; | 330 | case DCCPO_CHANGE_L: opt->dccpop_type = DCCPO_CONFIRM_R; break; |
331 | case DCCPO_CHANGE_R: opt->dccpop_type = DCCPO_CONFIRM_L; break; | ||
332 | default: DCCP_WARN("invalid type %d\n", type); return; | ||
333 | |||
334 | } | ||
323 | opt->dccpop_feat = feature; | 335 | opt->dccpop_feat = feature; |
324 | opt->dccpop_val = NULL; | 336 | opt->dccpop_val = NULL; |
325 | opt->dccpop_len = 0; | 337 | opt->dccpop_len = 0; |
326 | 338 | ||
327 | /* change feature */ | 339 | /* change feature */ |
328 | dccp_pr_debug("Empty confirm feature %d type %d\n", feature, type); | 340 | dccp_pr_debug("Empty %s(%d)\n", dccp_feat_typename(type), feature); |
341 | |||
329 | list_add_tail(&opt->dccpop_node, &dmsk->dccpms_conf); | 342 | list_add_tail(&opt->dccpop_node, &dmsk->dccpms_conf); |
330 | } | 343 | } |
331 | 344 | ||
@@ -359,7 +372,7 @@ int dccp_feat_change_recv(struct sock *sk, u8 type, u8 feature, u8 *val, u8 len) | |||
359 | { | 372 | { |
360 | int rc; | 373 | int rc; |
361 | 374 | ||
362 | dccp_pr_debug("got feat change type=%d feat=%d\n", type, feature); | 375 | dccp_feat_debug(type, feature, *val); |
363 | 376 | ||
364 | /* figure out if it's SP or NN feature */ | 377 | /* figure out if it's SP or NN feature */ |
365 | switch (feature) { | 378 | switch (feature) { |
@@ -375,6 +388,8 @@ int dccp_feat_change_recv(struct sock *sk, u8 type, u8 feature, u8 *val, u8 len) | |||
375 | 388 | ||
376 | /* XXX implement other features */ | 389 | /* XXX implement other features */ |
377 | default: | 390 | default: |
391 | dccp_pr_debug("UNIMPLEMENTED: not handling %s(%d, ...)\n", | ||
392 | dccp_feat_typename(type), feature); | ||
378 | rc = -EFAULT; | 393 | rc = -EFAULT; |
379 | break; | 394 | break; |
380 | } | 395 | } |
@@ -403,20 +418,27 @@ int dccp_feat_confirm_recv(struct sock *sk, u8 type, u8 feature, | |||
403 | u8 t; | 418 | u8 t; |
404 | struct dccp_opt_pend *opt; | 419 | struct dccp_opt_pend *opt; |
405 | struct dccp_minisock *dmsk = dccp_msk(sk); | 420 | struct dccp_minisock *dmsk = dccp_msk(sk); |
406 | int rc = 1; | 421 | int found = 0; |
407 | int all_confirmed = 1; | 422 | int all_confirmed = 1; |
408 | 423 | ||
409 | dccp_pr_debug("got feat confirm type=%d feat=%d\n", type, feature); | 424 | dccp_feat_debug(type, feature, *val); |
410 | |||
411 | /* XXX sanity check type & feat */ | ||
412 | 425 | ||
413 | /* locate our change request */ | 426 | /* locate our change request */ |
414 | t = type == DCCPO_CONFIRM_L ? DCCPO_CHANGE_R : DCCPO_CHANGE_L; | 427 | switch (type) { |
428 | case DCCPO_CONFIRM_L: t = DCCPO_CHANGE_R; break; | ||
429 | case DCCPO_CONFIRM_R: t = DCCPO_CHANGE_L; break; | ||
430 | default: DCCP_WARN("invalid type %d\n", type); | ||
431 | return 1; | ||
432 | |||
433 | } | ||
434 | /* XXX sanity check feature value */ | ||
415 | 435 | ||
416 | list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) { | 436 | list_for_each_entry(opt, &dmsk->dccpms_pending, dccpop_node) { |
417 | if (!opt->dccpop_conf && opt->dccpop_type == t && | 437 | if (!opt->dccpop_conf && opt->dccpop_type == t && |
418 | opt->dccpop_feat == feature) { | 438 | opt->dccpop_feat == feature) { |
419 | /* we found it */ | 439 | found = 1; |
440 | dccp_pr_debug("feature %d found\n", opt->dccpop_feat); | ||
441 | |||
420 | /* XXX do sanity check */ | 442 | /* XXX do sanity check */ |
421 | 443 | ||
422 | opt->dccpop_conf = 1; | 444 | opt->dccpop_conf = 1; |
@@ -425,9 +447,7 @@ int dccp_feat_confirm_recv(struct sock *sk, u8 type, u8 feature, | |||
425 | dccp_feat_update(sk, opt->dccpop_type, | 447 | dccp_feat_update(sk, opt->dccpop_type, |
426 | opt->dccpop_feat, *val); | 448 | opt->dccpop_feat, *val); |
427 | 449 | ||
428 | dccp_pr_debug("feat %d type %d confirmed %d\n", | 450 | /* XXX check the return value of dccp_feat_update */ |
429 | feature, type, *val); | ||
430 | rc = 0; | ||
431 | break; | 451 | break; |
432 | } | 452 | } |
433 | 453 | ||
@@ -446,9 +466,9 @@ int dccp_feat_confirm_recv(struct sock *sk, u8 type, u8 feature, | |||
446 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); | 466 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); |
447 | } | 467 | } |
448 | 468 | ||
449 | if (rc) | 469 | if (!found) |
450 | dccp_pr_debug("feat %d type %d never requested\n", | 470 | dccp_pr_debug("%s(%d, ...) never requested\n", |
451 | feature, type); | 471 | dccp_feat_typename(type), feature); |
452 | return 0; | 472 | return 0; |
453 | } | 473 | } |
454 | 474 | ||
@@ -501,20 +521,18 @@ int dccp_feat_clone(struct sock *oldsk, struct sock *newsk) | |||
501 | list_for_each_entry(opt, &olddmsk->dccpms_pending, dccpop_node) { | 521 | list_for_each_entry(opt, &olddmsk->dccpms_pending, dccpop_node) { |
502 | struct dccp_opt_pend *newopt; | 522 | struct dccp_opt_pend *newopt; |
503 | /* copy the value of the option */ | 523 | /* copy the value of the option */ |
504 | u8 *val = kmalloc(opt->dccpop_len, GFP_ATOMIC); | 524 | u8 *val = kmemdup(opt->dccpop_val, opt->dccpop_len, GFP_ATOMIC); |
505 | 525 | ||
506 | if (val == NULL) | 526 | if (val == NULL) |
507 | goto out_clean; | 527 | goto out_clean; |
508 | memcpy(val, opt->dccpop_val, opt->dccpop_len); | ||
509 | 528 | ||
510 | newopt = kmalloc(sizeof(*newopt), GFP_ATOMIC); | 529 | newopt = kmemdup(opt, sizeof(*newopt), GFP_ATOMIC); |
511 | if (newopt == NULL) { | 530 | if (newopt == NULL) { |
512 | kfree(val); | 531 | kfree(val); |
513 | goto out_clean; | 532 | goto out_clean; |
514 | } | 533 | } |
515 | 534 | ||
516 | /* insert the option */ | 535 | /* insert the option */ |
517 | memcpy(newopt, opt, sizeof(*newopt)); | ||
518 | newopt->dccpop_val = val; | 536 | newopt->dccpop_val = val; |
519 | list_add_tail(&newopt->dccpop_node, &newdmsk->dccpms_pending); | 537 | list_add_tail(&newopt->dccpop_node, &newdmsk->dccpms_pending); |
520 | 538 | ||
@@ -545,10 +563,9 @@ static int __dccp_feat_init(struct dccp_minisock *dmsk, u8 type, u8 feat, | |||
545 | u8 *val, u8 len) | 563 | u8 *val, u8 len) |
546 | { | 564 | { |
547 | int rc = -ENOMEM; | 565 | int rc = -ENOMEM; |
548 | u8 *copy = kmalloc(len, GFP_KERNEL); | 566 | u8 *copy = kmemdup(val, len, GFP_KERNEL); |
549 | 567 | ||
550 | if (copy != NULL) { | 568 | if (copy != NULL) { |
551 | memcpy(copy, val, len); | ||
552 | rc = dccp_feat_change(dmsk, type, feat, copy, len, GFP_KERNEL); | 569 | rc = dccp_feat_change(dmsk, type, feat, copy, len, GFP_KERNEL); |
553 | if (rc) | 570 | if (rc) |
554 | kfree(copy); | 571 | kfree(copy); |
@@ -583,3 +600,45 @@ out: | |||
583 | } | 600 | } |
584 | 601 | ||
585 | EXPORT_SYMBOL_GPL(dccp_feat_init); | 602 | EXPORT_SYMBOL_GPL(dccp_feat_init); |
603 | |||
604 | #ifdef CONFIG_IP_DCCP_DEBUG | ||
605 | const char *dccp_feat_typename(const u8 type) | ||
606 | { | ||
607 | switch(type) { | ||
608 | case DCCPO_CHANGE_L: return("ChangeL"); | ||
609 | case DCCPO_CONFIRM_L: return("ConfirmL"); | ||
610 | case DCCPO_CHANGE_R: return("ChangeR"); | ||
611 | case DCCPO_CONFIRM_R: return("ConfirmR"); | ||
612 | /* the following case must not appear in feature negotation */ | ||
613 | default: dccp_pr_debug("unknown type %d [BUG!]\n", type); | ||
614 | } | ||
615 | return NULL; | ||
616 | } | ||
617 | |||
618 | EXPORT_SYMBOL_GPL(dccp_feat_typename); | ||
619 | |||
620 | const char *dccp_feat_name(const u8 feat) | ||
621 | { | ||
622 | static const char *feature_names[] = { | ||
623 | [DCCPF_RESERVED] = "Reserved", | ||
624 | [DCCPF_CCID] = "CCID", | ||
625 | [DCCPF_SHORT_SEQNOS] = "Allow Short Seqnos", | ||
626 | [DCCPF_SEQUENCE_WINDOW] = "Sequence Window", | ||
627 | [DCCPF_ECN_INCAPABLE] = "ECN Incapable", | ||
628 | [DCCPF_ACK_RATIO] = "Ack Ratio", | ||
629 | [DCCPF_SEND_ACK_VECTOR] = "Send ACK Vector", | ||
630 | [DCCPF_SEND_NDP_COUNT] = "Send NDP Count", | ||
631 | [DCCPF_MIN_CSUM_COVER] = "Min. Csum Coverage", | ||
632 | [DCCPF_DATA_CHECKSUM] = "Send Data Checksum", | ||
633 | }; | ||
634 | if (feat >= DCCPF_MIN_CCID_SPECIFIC) | ||
635 | return "CCID-specific"; | ||
636 | |||
637 | if (dccp_feat_is_reserved(feat)) | ||
638 | return feature_names[DCCPF_RESERVED]; | ||
639 | |||
640 | return feature_names[feat]; | ||
641 | } | ||
642 | |||
643 | EXPORT_SYMBOL_GPL(dccp_feat_name); | ||
644 | #endif /* CONFIG_IP_DCCP_DEBUG */ | ||
diff --git a/net/dccp/feat.h b/net/dccp/feat.h index cee553d416ca..2c373ad7edcf 100644 --- a/net/dccp/feat.h +++ b/net/dccp/feat.h | |||
@@ -12,9 +12,46 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include "dccp.h" | ||
15 | 16 | ||
16 | struct sock; | 17 | static inline int dccp_feat_is_valid_length(u8 type, u8 feature, u8 len) |
17 | struct dccp_minisock; | 18 | { |
19 | /* sec. 6.1: Confirm has at least length 3, | ||
20 | * sec. 6.2: Change has at least length 4 */ | ||
21 | if (len < 3) | ||
22 | return 1; | ||
23 | if (len < 4 && (type == DCCPO_CHANGE_L || type == DCCPO_CHANGE_R)) | ||
24 | return 1; | ||
25 | /* XXX: add per-feature length validation (sec. 6.6.8) */ | ||
26 | return 0; | ||
27 | } | ||
28 | |||
29 | static inline int dccp_feat_is_reserved(const u8 feat) | ||
30 | { | ||
31 | return (feat > DCCPF_DATA_CHECKSUM && | ||
32 | feat < DCCPF_MIN_CCID_SPECIFIC) || | ||
33 | feat == DCCPF_RESERVED; | ||
34 | } | ||
35 | |||
36 | /* feature negotiation knows only these four option types (RFC 4340, sec. 6) */ | ||
37 | static inline int dccp_feat_is_valid_type(const u8 optnum) | ||
38 | { | ||
39 | return optnum >= DCCPO_CHANGE_L && optnum <= DCCPO_CONFIRM_R; | ||
40 | |||
41 | } | ||
42 | |||
43 | #ifdef CONFIG_IP_DCCP_DEBUG | ||
44 | extern const char *dccp_feat_typename(const u8 type); | ||
45 | extern const char *dccp_feat_name(const u8 feat); | ||
46 | |||
47 | static inline void dccp_feat_debug(const u8 type, const u8 feat, const u8 val) | ||
48 | { | ||
49 | dccp_pr_debug("%s(%s (%d), %d)\n", dccp_feat_typename(type), | ||
50 | dccp_feat_name(feat), feat, val); | ||
51 | } | ||
52 | #else | ||
53 | #define dccp_feat_debug(type, feat, val) | ||
54 | #endif /* CONFIG_IP_DCCP_DEBUG */ | ||
18 | 55 | ||
19 | extern int dccp_feat_change(struct dccp_minisock *dmsk, u8 type, u8 feature, | 56 | extern int dccp_feat_change(struct dccp_minisock *dmsk, u8 type, u8 feature, |
20 | u8 *val, u8 len, gfp_t gfp); | 57 | u8 *val, u8 len, gfp_t gfp); |
@@ -26,11 +63,4 @@ extern void dccp_feat_clean(struct dccp_minisock *dmsk); | |||
26 | extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk); | 63 | extern int dccp_feat_clone(struct sock *oldsk, struct sock *newsk); |
27 | extern int dccp_feat_init(struct dccp_minisock *dmsk); | 64 | extern int dccp_feat_init(struct dccp_minisock *dmsk); |
28 | 65 | ||
29 | extern int dccp_feat_default_sequence_window; | ||
30 | extern int dccp_feat_default_rx_ccid; | ||
31 | extern int dccp_feat_default_tx_ccid; | ||
32 | extern int dccp_feat_default_ack_ratio; | ||
33 | extern int dccp_feat_default_send_ack_vector; | ||
34 | extern int dccp_feat_default_send_ndp_count; | ||
35 | |||
36 | #endif /* _DCCP_FEAT_H */ | 66 | #endif /* _DCCP_FEAT_H */ |
diff --git a/net/dccp/input.c b/net/dccp/input.c index 1d24881ac0ab..7371a2f3acf4 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -128,21 +128,18 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) | |||
128 | DCCP_PKT_WITHOUT_ACK_SEQ)) | 128 | DCCP_PKT_WITHOUT_ACK_SEQ)) |
129 | dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq; | 129 | dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq; |
130 | } else { | 130 | } else { |
131 | LIMIT_NETDEBUG(KERN_WARNING "DCCP: Step 6 failed for %s packet, " | 131 | DCCP_WARN("DCCP: Step 6 failed for %s packet, " |
132 | "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and " | 132 | "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and " |
133 | "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), " | 133 | "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), " |
134 | "sending SYNC...\n", | 134 | "sending SYNC...\n", dccp_packet_name(dh->dccph_type), |
135 | dccp_packet_name(dh->dccph_type), | 135 | (unsigned long long) lswl, |
136 | (unsigned long long) lswl, | 136 | (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq, |
137 | (unsigned long long) | 137 | (unsigned long long) dp->dccps_swh, |
138 | DCCP_SKB_CB(skb)->dccpd_seq, | 138 | (DCCP_SKB_CB(skb)->dccpd_ack_seq == |
139 | (unsigned long long) dp->dccps_swh, | ||
140 | (DCCP_SKB_CB(skb)->dccpd_ack_seq == | ||
141 | DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists", | 139 | DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists", |
142 | (unsigned long long) lawl, | 140 | (unsigned long long) lawl, |
143 | (unsigned long long) | 141 | (unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq, |
144 | DCCP_SKB_CB(skb)->dccpd_ack_seq, | 142 | (unsigned long long) dp->dccps_awh); |
145 | (unsigned long long) dp->dccps_awh); | ||
146 | dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC); | 143 | dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC); |
147 | return -1; | 144 | return -1; |
148 | } | 145 | } |
@@ -431,29 +428,25 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
431 | 428 | ||
432 | /* | 429 | /* |
433 | * Step 3: Process LISTEN state | 430 | * Step 3: Process LISTEN state |
434 | * (Continuing from dccp_v4_do_rcv and dccp_v6_do_rcv) | ||
435 | * | 431 | * |
436 | * If S.state == LISTEN, | 432 | * If S.state == LISTEN, |
437 | * If P.type == Request or P contains a valid Init Cookie | 433 | * If P.type == Request or P contains a valid Init Cookie option, |
438 | * option, | 434 | * (* Must scan the packet's options to check for Init |
439 | * * Must scan the packet's options to check for an Init | 435 | * Cookies. Only Init Cookies are processed here, |
440 | * Cookie. Only the Init Cookie is processed here, | 436 | * however; other options are processed in Step 8. This |
441 | * however; other options are processed in Step 8. This | 437 | * scan need only be performed if the endpoint uses Init |
442 | * scan need only be performed if the endpoint uses Init | 438 | * Cookies *) |
443 | * Cookies * | 439 | * (* Generate a new socket and switch to that socket *) |
444 | * * Generate a new socket and switch to that socket * | 440 | * Set S := new socket for this port pair |
445 | * Set S := new socket for this port pair | 441 | * S.state = RESPOND |
446 | * S.state = RESPOND | 442 | * Choose S.ISS (initial seqno) or set from Init Cookies |
447 | * Choose S.ISS (initial seqno) or set from Init Cookie | 443 | * Initialize S.GAR := S.ISS |
448 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie | 444 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init |
449 | * Continue with S.state == RESPOND | 445 | * Cookies Continue with S.state == RESPOND |
450 | * * A Response packet will be generated in Step 11 * | 446 | * (* A Response packet will be generated in Step 11 *) |
451 | * Otherwise, | 447 | * Otherwise, |
452 | * Generate Reset(No Connection) unless P.type == Reset | 448 | * Generate Reset(No Connection) unless P.type == Reset |
453 | * Drop packet and return | 449 | * Drop packet and return |
454 | * | ||
455 | * NOTE: the check for the packet types is done in | ||
456 | * dccp_rcv_state_process | ||
457 | */ | 450 | */ |
458 | if (sk->sk_state == DCCP_LISTEN) { | 451 | if (sk->sk_state == DCCP_LISTEN) { |
459 | if (dh->dccph_type == DCCP_PKT_REQUEST) { | 452 | if (dh->dccph_type == DCCP_PKT_REQUEST) { |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index e08e7688a263..ff81679c9f17 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -113,13 +113,8 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
113 | /* OK, now commit destination to socket. */ | 113 | /* OK, now commit destination to socket. */ |
114 | sk_setup_caps(sk, &rt->u.dst); | 114 | sk_setup_caps(sk, &rt->u.dst); |
115 | 115 | ||
116 | dp->dccps_gar = | 116 | dp->dccps_iss = secure_dccp_sequence_number(inet->saddr, inet->daddr, |
117 | dp->dccps_iss = secure_dccp_sequence_number(inet->saddr, | 117 | inet->sport, inet->dport); |
118 | inet->daddr, | ||
119 | inet->sport, | ||
120 | usin->sin_port); | ||
121 | dccp_update_gss(sk, dp->dccps_iss); | ||
122 | |||
123 | inet->id = dp->dccps_iss ^ jiffies; | 118 | inet->id = dp->dccps_iss ^ jiffies; |
124 | 119 | ||
125 | err = dccp_connect(sk); | 120 | err = dccp_connect(sk); |
@@ -193,86 +188,6 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk, | |||
193 | } /* else let the usual retransmit timer handle it */ | 188 | } /* else let the usual retransmit timer handle it */ |
194 | } | 189 | } |
195 | 190 | ||
196 | static void dccp_v4_reqsk_send_ack(struct sk_buff *rxskb, | ||
197 | struct request_sock *req) | ||
198 | { | ||
199 | int err; | ||
200 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; | ||
201 | const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) + | ||
202 | sizeof(struct dccp_hdr_ext) + | ||
203 | sizeof(struct dccp_hdr_ack_bits); | ||
204 | struct sk_buff *skb; | ||
205 | |||
206 | if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL) | ||
207 | return; | ||
208 | |||
209 | skb = alloc_skb(dccp_v4_ctl_socket->sk->sk_prot->max_header, GFP_ATOMIC); | ||
210 | if (skb == NULL) | ||
211 | return; | ||
212 | |||
213 | /* Reserve space for headers. */ | ||
214 | skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header); | ||
215 | |||
216 | skb->dst = dst_clone(rxskb->dst); | ||
217 | |||
218 | skb->h.raw = skb_push(skb, dccp_hdr_ack_len); | ||
219 | dh = dccp_hdr(skb); | ||
220 | memset(dh, 0, dccp_hdr_ack_len); | ||
221 | |||
222 | /* Build DCCP header and checksum it. */ | ||
223 | dh->dccph_type = DCCP_PKT_ACK; | ||
224 | dh->dccph_sport = rxdh->dccph_dport; | ||
225 | dh->dccph_dport = rxdh->dccph_sport; | ||
226 | dh->dccph_doff = dccp_hdr_ack_len / 4; | ||
227 | dh->dccph_x = 1; | ||
228 | |||
229 | dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq); | ||
230 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), | ||
231 | DCCP_SKB_CB(rxskb)->dccpd_seq); | ||
232 | |||
233 | bh_lock_sock(dccp_v4_ctl_socket->sk); | ||
234 | err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk, | ||
235 | rxskb->nh.iph->daddr, | ||
236 | rxskb->nh.iph->saddr, NULL); | ||
237 | bh_unlock_sock(dccp_v4_ctl_socket->sk); | ||
238 | |||
239 | if (err == NET_XMIT_CN || err == 0) { | ||
240 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); | ||
241 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); | ||
242 | } | ||
243 | } | ||
244 | |||
245 | static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, | ||
246 | struct dst_entry *dst) | ||
247 | { | ||
248 | int err = -1; | ||
249 | struct sk_buff *skb; | ||
250 | |||
251 | /* First, grab a route. */ | ||
252 | |||
253 | if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL) | ||
254 | goto out; | ||
255 | |||
256 | skb = dccp_make_response(sk, dst, req); | ||
257 | if (skb != NULL) { | ||
258 | const struct inet_request_sock *ireq = inet_rsk(req); | ||
259 | struct dccp_hdr *dh = dccp_hdr(skb); | ||
260 | |||
261 | dh->dccph_checksum = dccp_v4_checksum(skb, ireq->loc_addr, | ||
262 | ireq->rmt_addr); | ||
263 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
264 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, | ||
265 | ireq->rmt_addr, | ||
266 | ireq->opt); | ||
267 | if (err == NET_XMIT_CN) | ||
268 | err = 0; | ||
269 | } | ||
270 | |||
271 | out: | ||
272 | dst_release(dst); | ||
273 | return err; | ||
274 | } | ||
275 | |||
276 | /* | 191 | /* |
277 | * This routine is called by the ICMP module when it gets some sort of error | 192 | * This routine is called by the ICMP module when it gets some sort of error |
278 | * condition. If err < 0 then the socket should be closed and the error | 193 | * condition. If err < 0 then the socket should be closed and the error |
@@ -329,7 +244,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
329 | seq = dccp_hdr_seq(skb); | 244 | seq = dccp_hdr_seq(skb); |
330 | if (sk->sk_state != DCCP_LISTEN && | 245 | if (sk->sk_state != DCCP_LISTEN && |
331 | !between48(seq, dp->dccps_swl, dp->dccps_swh)) { | 246 | !between48(seq, dp->dccps_swl, dp->dccps_swh)) { |
332 | NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS); | 247 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); |
333 | goto out; | 248 | goto out; |
334 | } | 249 | } |
335 | 250 | ||
@@ -429,19 +344,24 @@ out: | |||
429 | sock_put(sk); | 344 | sock_put(sk); |
430 | } | 345 | } |
431 | 346 | ||
432 | /* This routine computes an IPv4 DCCP checksum. */ | 347 | static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb, |
433 | void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) | 348 | __be32 src, __be32 dst) |
349 | { | ||
350 | return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum); | ||
351 | } | ||
352 | |||
353 | void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb) | ||
434 | { | 354 | { |
435 | const struct inet_sock *inet = inet_sk(sk); | 355 | const struct inet_sock *inet = inet_sk(sk); |
436 | struct dccp_hdr *dh = dccp_hdr(skb); | 356 | struct dccp_hdr *dh = dccp_hdr(skb); |
437 | 357 | ||
438 | dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr, inet->daddr); | 358 | dccp_csum_outgoing(skb); |
359 | dh->dccph_checksum = dccp_v4_csum_finish(skb, inet->saddr, inet->daddr); | ||
439 | } | 360 | } |
440 | 361 | ||
441 | EXPORT_SYMBOL_GPL(dccp_v4_send_check); | 362 | EXPORT_SYMBOL_GPL(dccp_v4_send_check); |
442 | 363 | ||
443 | static inline u64 dccp_v4_init_sequence(const struct sock *sk, | 364 | static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb) |
444 | const struct sk_buff *skb) | ||
445 | { | 365 | { |
446 | return secure_dccp_sequence_number(skb->nh.iph->daddr, | 366 | return secure_dccp_sequence_number(skb->nh.iph->daddr, |
447 | skb->nh.iph->saddr, | 367 | skb->nh.iph->saddr, |
@@ -449,95 +369,6 @@ static inline u64 dccp_v4_init_sequence(const struct sock *sk, | |||
449 | dccp_hdr(skb)->dccph_sport); | 369 | dccp_hdr(skb)->dccph_sport); |
450 | } | 370 | } |
451 | 371 | ||
452 | static struct request_sock_ops dccp_request_sock_ops; | ||
453 | |||
454 | int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | ||
455 | { | ||
456 | struct inet_request_sock *ireq; | ||
457 | struct dccp_sock dp; | ||
458 | struct request_sock *req; | ||
459 | struct dccp_request_sock *dreq; | ||
460 | const __be32 saddr = skb->nh.iph->saddr; | ||
461 | const __be32 daddr = skb->nh.iph->daddr; | ||
462 | const __be32 service = dccp_hdr_request(skb)->dccph_req_service; | ||
463 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | ||
464 | __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; | ||
465 | |||
466 | /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ | ||
467 | if (((struct rtable *)skb->dst)->rt_flags & | ||
468 | (RTCF_BROADCAST | RTCF_MULTICAST)) { | ||
469 | reset_code = DCCP_RESET_CODE_NO_CONNECTION; | ||
470 | goto drop; | ||
471 | } | ||
472 | |||
473 | if (dccp_bad_service_code(sk, service)) { | ||
474 | reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; | ||
475 | goto drop; | ||
476 | } | ||
477 | /* | ||
478 | * TW buckets are converted to open requests without | ||
479 | * limitations, they conserve resources and peer is | ||
480 | * evidently real one. | ||
481 | */ | ||
482 | if (inet_csk_reqsk_queue_is_full(sk)) | ||
483 | goto drop; | ||
484 | |||
485 | /* | ||
486 | * Accept backlog is full. If we have already queued enough | ||
487 | * of warm entries in syn queue, drop request. It is better than | ||
488 | * clogging syn queue with openreqs with exponentially increasing | ||
489 | * timeout. | ||
490 | */ | ||
491 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) | ||
492 | goto drop; | ||
493 | |||
494 | req = reqsk_alloc(&dccp_request_sock_ops); | ||
495 | if (req == NULL) | ||
496 | goto drop; | ||
497 | |||
498 | if (dccp_parse_options(sk, skb)) | ||
499 | goto drop_and_free; | ||
500 | |||
501 | dccp_openreq_init(req, &dp, skb); | ||
502 | |||
503 | if (security_inet_conn_request(sk, skb, req)) | ||
504 | goto drop_and_free; | ||
505 | |||
506 | ireq = inet_rsk(req); | ||
507 | ireq->loc_addr = daddr; | ||
508 | ireq->rmt_addr = saddr; | ||
509 | req->rcv_wnd = dccp_feat_default_sequence_window; | ||
510 | ireq->opt = NULL; | ||
511 | |||
512 | /* | ||
513 | * Step 3: Process LISTEN state | ||
514 | * | ||
515 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie | ||
516 | * | ||
517 | * In fact we defer setting S.GSR, S.SWL, S.SWH to | ||
518 | * dccp_create_openreq_child. | ||
519 | */ | ||
520 | dreq = dccp_rsk(req); | ||
521 | dreq->dreq_isr = dcb->dccpd_seq; | ||
522 | dreq->dreq_iss = dccp_v4_init_sequence(sk, skb); | ||
523 | dreq->dreq_service = service; | ||
524 | |||
525 | if (dccp_v4_send_response(sk, req, NULL)) | ||
526 | goto drop_and_free; | ||
527 | |||
528 | inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); | ||
529 | return 0; | ||
530 | |||
531 | drop_and_free: | ||
532 | reqsk_free(req); | ||
533 | drop: | ||
534 | DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); | ||
535 | dcb->dccpd_reset_code = reset_code; | ||
536 | return -1; | ||
537 | } | ||
538 | |||
539 | EXPORT_SYMBOL_GPL(dccp_v4_conn_request); | ||
540 | |||
541 | /* | 372 | /* |
542 | * The three way handshake has completed - we got a valid ACK or DATAACK - | 373 | * The three way handshake has completed - we got a valid ACK or DATAACK - |
543 | * now create the new socket. | 374 | * now create the new socket. |
@@ -623,47 +454,6 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
623 | return sk; | 454 | return sk; |
624 | } | 455 | } |
625 | 456 | ||
626 | int dccp_v4_checksum(const struct sk_buff *skb, const __be32 saddr, | ||
627 | const __be32 daddr) | ||
628 | { | ||
629 | const struct dccp_hdr* dh = dccp_hdr(skb); | ||
630 | int checksum_len; | ||
631 | u32 tmp; | ||
632 | |||
633 | if (dh->dccph_cscov == 0) | ||
634 | checksum_len = skb->len; | ||
635 | else { | ||
636 | checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32); | ||
637 | checksum_len = checksum_len < skb->len ? checksum_len : | ||
638 | skb->len; | ||
639 | } | ||
640 | |||
641 | tmp = csum_partial((unsigned char *)dh, checksum_len, 0); | ||
642 | return csum_tcpudp_magic(saddr, daddr, checksum_len, | ||
643 | IPPROTO_DCCP, tmp); | ||
644 | } | ||
645 | |||
646 | EXPORT_SYMBOL_GPL(dccp_v4_checksum); | ||
647 | |||
648 | static int dccp_v4_verify_checksum(struct sk_buff *skb, | ||
649 | const __be32 saddr, const __be32 daddr) | ||
650 | { | ||
651 | struct dccp_hdr *dh = dccp_hdr(skb); | ||
652 | int checksum_len; | ||
653 | u32 tmp; | ||
654 | |||
655 | if (dh->dccph_cscov == 0) | ||
656 | checksum_len = skb->len; | ||
657 | else { | ||
658 | checksum_len = (dh->dccph_cscov + dh->dccph_x) * sizeof(u32); | ||
659 | checksum_len = checksum_len < skb->len ? checksum_len : | ||
660 | skb->len; | ||
661 | } | ||
662 | tmp = csum_partial((unsigned char *)dh, checksum_len, 0); | ||
663 | return csum_tcpudp_magic(saddr, daddr, checksum_len, | ||
664 | IPPROTO_DCCP, tmp) == 0 ? 0 : -1; | ||
665 | } | ||
666 | |||
667 | static struct dst_entry* dccp_v4_route_skb(struct sock *sk, | 457 | static struct dst_entry* dccp_v4_route_skb(struct sock *sk, |
668 | struct sk_buff *skb) | 458 | struct sk_buff *skb) |
669 | { | 459 | { |
@@ -689,7 +479,37 @@ static struct dst_entry* dccp_v4_route_skb(struct sock *sk, | |||
689 | return &rt->u.dst; | 479 | return &rt->u.dst; |
690 | } | 480 | } |
691 | 481 | ||
692 | static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) | 482 | static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, |
483 | struct dst_entry *dst) | ||
484 | { | ||
485 | int err = -1; | ||
486 | struct sk_buff *skb; | ||
487 | |||
488 | /* First, grab a route. */ | ||
489 | |||
490 | if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL) | ||
491 | goto out; | ||
492 | |||
493 | skb = dccp_make_response(sk, dst, req); | ||
494 | if (skb != NULL) { | ||
495 | const struct inet_request_sock *ireq = inet_rsk(req); | ||
496 | struct dccp_hdr *dh = dccp_hdr(skb); | ||
497 | |||
498 | dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr, | ||
499 | ireq->rmt_addr); | ||
500 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
501 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, | ||
502 | ireq->rmt_addr, | ||
503 | ireq->opt); | ||
504 | err = net_xmit_eval(err); | ||
505 | } | ||
506 | |||
507 | out: | ||
508 | dst_release(dst); | ||
509 | return err; | ||
510 | } | ||
511 | |||
512 | static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) | ||
693 | { | 513 | { |
694 | int err; | 514 | int err; |
695 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; | 515 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; |
@@ -698,7 +518,7 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) | |||
698 | sizeof(struct dccp_hdr_reset); | 518 | sizeof(struct dccp_hdr_reset); |
699 | struct sk_buff *skb; | 519 | struct sk_buff *skb; |
700 | struct dst_entry *dst; | 520 | struct dst_entry *dst; |
701 | u64 seqno; | 521 | u64 seqno = 0; |
702 | 522 | ||
703 | /* Never send a reset in response to a reset. */ | 523 | /* Never send a reset in response to a reset. */ |
704 | if (rxdh->dccph_type == DCCP_PKT_RESET) | 524 | if (rxdh->dccph_type == DCCP_PKT_RESET) |
@@ -720,9 +540,7 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) | |||
720 | skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header); | 540 | skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header); |
721 | skb->dst = dst_clone(dst); | 541 | skb->dst = dst_clone(dst); |
722 | 542 | ||
723 | skb->h.raw = skb_push(skb, dccp_hdr_reset_len); | 543 | dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len); |
724 | dh = dccp_hdr(skb); | ||
725 | memset(dh, 0, dccp_hdr_reset_len); | ||
726 | 544 | ||
727 | /* Build DCCP header and checksum it. */ | 545 | /* Build DCCP header and checksum it. */ |
728 | dh->dccph_type = DCCP_PKT_RESET; | 546 | dh->dccph_type = DCCP_PKT_RESET; |
@@ -734,16 +552,15 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) | |||
734 | DCCP_SKB_CB(rxskb)->dccpd_reset_code; | 552 | DCCP_SKB_CB(rxskb)->dccpd_reset_code; |
735 | 553 | ||
736 | /* See "8.3.1. Abnormal Termination" in RFC 4340 */ | 554 | /* See "8.3.1. Abnormal Termination" in RFC 4340 */ |
737 | seqno = 0; | ||
738 | if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) | 555 | if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) |
739 | dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1); | 556 | dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1); |
740 | 557 | ||
741 | dccp_hdr_set_seq(dh, seqno); | 558 | dccp_hdr_set_seq(dh, seqno); |
742 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), | 559 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq); |
743 | DCCP_SKB_CB(rxskb)->dccpd_seq); | ||
744 | 560 | ||
745 | dh->dccph_checksum = dccp_v4_checksum(skb, rxskb->nh.iph->saddr, | 561 | dccp_csum_outgoing(skb); |
746 | rxskb->nh.iph->daddr); | 562 | dh->dccph_checksum = dccp_v4_csum_finish(skb, rxskb->nh.iph->saddr, |
563 | rxskb->nh.iph->daddr); | ||
747 | 564 | ||
748 | bh_lock_sock(dccp_v4_ctl_socket->sk); | 565 | bh_lock_sock(dccp_v4_ctl_socket->sk); |
749 | err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk, | 566 | err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk, |
@@ -751,7 +568,7 @@ static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) | |||
751 | rxskb->nh.iph->saddr, NULL); | 568 | rxskb->nh.iph->saddr, NULL); |
752 | bh_unlock_sock(dccp_v4_ctl_socket->sk); | 569 | bh_unlock_sock(dccp_v4_ctl_socket->sk); |
753 | 570 | ||
754 | if (err == NET_XMIT_CN || err == 0) { | 571 | if (net_xmit_eval(err) == 0) { |
755 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); | 572 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); |
756 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); | 573 | DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); |
757 | } | 574 | } |
@@ -759,6 +576,103 @@ out: | |||
759 | dst_release(dst); | 576 | dst_release(dst); |
760 | } | 577 | } |
761 | 578 | ||
579 | static void dccp_v4_reqsk_destructor(struct request_sock *req) | ||
580 | { | ||
581 | kfree(inet_rsk(req)->opt); | ||
582 | } | ||
583 | |||
584 | static struct request_sock_ops dccp_request_sock_ops __read_mostly = { | ||
585 | .family = PF_INET, | ||
586 | .obj_size = sizeof(struct dccp_request_sock), | ||
587 | .rtx_syn_ack = dccp_v4_send_response, | ||
588 | .send_ack = dccp_reqsk_send_ack, | ||
589 | .destructor = dccp_v4_reqsk_destructor, | ||
590 | .send_reset = dccp_v4_ctl_send_reset, | ||
591 | }; | ||
592 | |||
593 | int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | ||
594 | { | ||
595 | struct inet_request_sock *ireq; | ||
596 | struct request_sock *req; | ||
597 | struct dccp_request_sock *dreq; | ||
598 | const __be32 service = dccp_hdr_request(skb)->dccph_req_service; | ||
599 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | ||
600 | __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; | ||
601 | |||
602 | /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ | ||
603 | if (((struct rtable *)skb->dst)->rt_flags & | ||
604 | (RTCF_BROADCAST | RTCF_MULTICAST)) { | ||
605 | reset_code = DCCP_RESET_CODE_NO_CONNECTION; | ||
606 | goto drop; | ||
607 | } | ||
608 | |||
609 | if (dccp_bad_service_code(sk, service)) { | ||
610 | reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; | ||
611 | goto drop; | ||
612 | } | ||
613 | /* | ||
614 | * TW buckets are converted to open requests without | ||
615 | * limitations, they conserve resources and peer is | ||
616 | * evidently real one. | ||
617 | */ | ||
618 | if (inet_csk_reqsk_queue_is_full(sk)) | ||
619 | goto drop; | ||
620 | |||
621 | /* | ||
622 | * Accept backlog is full. If we have already queued enough | ||
623 | * of warm entries in syn queue, drop request. It is better than | ||
624 | * clogging syn queue with openreqs with exponentially increasing | ||
625 | * timeout. | ||
626 | */ | ||
627 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) | ||
628 | goto drop; | ||
629 | |||
630 | req = reqsk_alloc(&dccp_request_sock_ops); | ||
631 | if (req == NULL) | ||
632 | goto drop; | ||
633 | |||
634 | if (dccp_parse_options(sk, skb)) | ||
635 | goto drop_and_free; | ||
636 | |||
637 | dccp_reqsk_init(req, skb); | ||
638 | |||
639 | if (security_inet_conn_request(sk, skb, req)) | ||
640 | goto drop_and_free; | ||
641 | |||
642 | ireq = inet_rsk(req); | ||
643 | ireq->loc_addr = skb->nh.iph->daddr; | ||
644 | ireq->rmt_addr = skb->nh.iph->saddr; | ||
645 | ireq->opt = NULL; | ||
646 | |||
647 | /* | ||
648 | * Step 3: Process LISTEN state | ||
649 | * | ||
650 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie | ||
651 | * | ||
652 | * In fact we defer setting S.GSR, S.SWL, S.SWH to | ||
653 | * dccp_create_openreq_child. | ||
654 | */ | ||
655 | dreq = dccp_rsk(req); | ||
656 | dreq->dreq_isr = dcb->dccpd_seq; | ||
657 | dreq->dreq_iss = dccp_v4_init_sequence(skb); | ||
658 | dreq->dreq_service = service; | ||
659 | |||
660 | if (dccp_v4_send_response(sk, req, NULL)) | ||
661 | goto drop_and_free; | ||
662 | |||
663 | inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); | ||
664 | return 0; | ||
665 | |||
666 | drop_and_free: | ||
667 | reqsk_free(req); | ||
668 | drop: | ||
669 | DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); | ||
670 | dcb->dccpd_reset_code = reset_code; | ||
671 | return -1; | ||
672 | } | ||
673 | |||
674 | EXPORT_SYMBOL_GPL(dccp_v4_conn_request); | ||
675 | |||
762 | int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | 676 | int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) |
763 | { | 677 | { |
764 | struct dccp_hdr *dh = dccp_hdr(skb); | 678 | struct dccp_hdr *dh = dccp_hdr(skb); |
@@ -771,24 +685,23 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
771 | 685 | ||
772 | /* | 686 | /* |
773 | * Step 3: Process LISTEN state | 687 | * Step 3: Process LISTEN state |
774 | * If S.state == LISTEN, | 688 | * If P.type == Request or P contains a valid Init Cookie option, |
775 | * If P.type == Request or P contains a valid Init Cookie | 689 | * (* Must scan the packet's options to check for Init |
776 | * option, | 690 | * Cookies. Only Init Cookies are processed here, |
777 | * * Must scan the packet's options to check for an Init | 691 | * however; other options are processed in Step 8. This |
778 | * Cookie. Only the Init Cookie is processed here, | 692 | * scan need only be performed if the endpoint uses Init |
779 | * however; other options are processed in Step 8. This | 693 | * Cookies *) |
780 | * scan need only be performed if the endpoint uses Init | 694 | * (* Generate a new socket and switch to that socket *) |
781 | * Cookies * | 695 | * Set S := new socket for this port pair |
782 | * * Generate a new socket and switch to that socket * | 696 | * S.state = RESPOND |
783 | * Set S := new socket for this port pair | 697 | * Choose S.ISS (initial seqno) or set from Init Cookies |
784 | * S.state = RESPOND | 698 | * Initialize S.GAR := S.ISS |
785 | * Choose S.ISS (initial seqno) or set from Init Cookie | 699 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies |
786 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie | 700 | * Continue with S.state == RESPOND |
787 | * Continue with S.state == RESPOND | 701 | * (* A Response packet will be generated in Step 11 *) |
788 | * * A Response packet will be generated in Step 11 * | 702 | * Otherwise, |
789 | * Otherwise, | 703 | * Generate Reset(No Connection) unless P.type == Reset |
790 | * Generate Reset(No Connection) unless P.type == Reset | 704 | * Drop packet and return |
791 | * Drop packet and return | ||
792 | * | 705 | * |
793 | * NOTE: the check for the packet types is done in | 706 | * NOTE: the check for the packet types is done in |
794 | * dccp_rcv_state_process | 707 | * dccp_rcv_state_process |
@@ -811,7 +724,7 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
811 | return 0; | 724 | return 0; |
812 | 725 | ||
813 | reset: | 726 | reset: |
814 | dccp_v4_ctl_send_reset(skb); | 727 | dccp_v4_ctl_send_reset(sk, skb); |
815 | discard: | 728 | discard: |
816 | kfree_skb(skb); | 729 | kfree_skb(skb); |
817 | return 0; | 730 | return 0; |
@@ -819,60 +732,74 @@ discard: | |||
819 | 732 | ||
820 | EXPORT_SYMBOL_GPL(dccp_v4_do_rcv); | 733 | EXPORT_SYMBOL_GPL(dccp_v4_do_rcv); |
821 | 734 | ||
735 | /** | ||
736 | * dccp_invalid_packet - check for malformed packets | ||
737 | * Implements RFC 4340, 8.5: Step 1: Check header basics | ||
738 | * Packets that fail these checks are ignored and do not receive Resets. | ||
739 | */ | ||
822 | int dccp_invalid_packet(struct sk_buff *skb) | 740 | int dccp_invalid_packet(struct sk_buff *skb) |
823 | { | 741 | { |
824 | const struct dccp_hdr *dh; | 742 | const struct dccp_hdr *dh; |
743 | unsigned int cscov; | ||
825 | 744 | ||
826 | if (skb->pkt_type != PACKET_HOST) | 745 | if (skb->pkt_type != PACKET_HOST) |
827 | return 1; | 746 | return 1; |
828 | 747 | ||
748 | /* If the packet is shorter than 12 bytes, drop packet and return */ | ||
829 | if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) { | 749 | if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) { |
830 | LIMIT_NETDEBUG(KERN_WARNING "DCCP: pskb_may_pull failed\n"); | 750 | DCCP_WARN("pskb_may_pull failed\n"); |
831 | return 1; | 751 | return 1; |
832 | } | 752 | } |
833 | 753 | ||
834 | dh = dccp_hdr(skb); | 754 | dh = dccp_hdr(skb); |
835 | 755 | ||
836 | /* If the packet type is not understood, drop packet and return */ | 756 | /* If P.type is not understood, drop packet and return */ |
837 | if (dh->dccph_type >= DCCP_PKT_INVALID) { | 757 | if (dh->dccph_type >= DCCP_PKT_INVALID) { |
838 | LIMIT_NETDEBUG(KERN_WARNING "DCCP: invalid packet type\n"); | 758 | DCCP_WARN("invalid packet type\n"); |
839 | return 1; | 759 | return 1; |
840 | } | 760 | } |
841 | 761 | ||
842 | /* | 762 | /* |
843 | * If P.Data Offset is too small for packet type, or too large for | 763 | * If P.Data Offset is too small for packet type, drop packet and return |
844 | * packet, drop packet and return | ||
845 | */ | 764 | */ |
846 | if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { | 765 | if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { |
847 | LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) " | 766 | DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff); |
848 | "too small 1\n", | ||
849 | dh->dccph_doff); | ||
850 | return 1; | 767 | return 1; |
851 | } | 768 | } |
852 | 769 | /* | |
770 | * If P.Data Offset is too too large for packet, drop packet and return | ||
771 | */ | ||
853 | if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { | 772 | if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { |
854 | LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.Data Offset(%u) " | 773 | DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff); |
855 | "too small 2\n", | ||
856 | dh->dccph_doff); | ||
857 | return 1; | 774 | return 1; |
858 | } | 775 | } |
859 | 776 | ||
860 | dh = dccp_hdr(skb); | ||
861 | |||
862 | /* | 777 | /* |
863 | * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet | 778 | * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet |
864 | * has short sequence numbers), drop packet and return | 779 | * has short sequence numbers), drop packet and return |
865 | */ | 780 | */ |
866 | if (dh->dccph_x == 0 && | 781 | if (dh->dccph_type >= DCCP_PKT_DATA && |
867 | dh->dccph_type != DCCP_PKT_DATA && | 782 | dh->dccph_type <= DCCP_PKT_DATAACK && dh->dccph_x == 0) { |
868 | dh->dccph_type != DCCP_PKT_ACK && | 783 | DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n", |
869 | dh->dccph_type != DCCP_PKT_DATAACK) { | 784 | dccp_packet_name(dh->dccph_type)); |
870 | LIMIT_NETDEBUG(KERN_WARNING "DCCP: P.type (%s) not Data, Ack " | ||
871 | "nor DataAck and P.X == 0\n", | ||
872 | dccp_packet_name(dh->dccph_type)); | ||
873 | return 1; | 785 | return 1; |
874 | } | 786 | } |
875 | 787 | ||
788 | /* | ||
789 | * If P.CsCov is too large for the packet size, drop packet and return. | ||
790 | * This must come _before_ checksumming (not as RFC 4340 suggests). | ||
791 | */ | ||
792 | cscov = dccp_csum_coverage(skb); | ||
793 | if (cscov > skb->len) { | ||
794 | DCCP_WARN("P.CsCov %u exceeds packet length %d\n", | ||
795 | dh->dccph_cscov, skb->len); | ||
796 | return 1; | ||
797 | } | ||
798 | |||
799 | /* If header checksum is incorrect, drop packet and return. | ||
800 | * (This step is completed in the AF-dependent functions.) */ | ||
801 | skb->csum = skb_checksum(skb, 0, cscov, 0); | ||
802 | |||
876 | return 0; | 803 | return 0; |
877 | } | 804 | } |
878 | 805 | ||
@@ -883,17 +810,16 @@ static int dccp_v4_rcv(struct sk_buff *skb) | |||
883 | { | 810 | { |
884 | const struct dccp_hdr *dh; | 811 | const struct dccp_hdr *dh; |
885 | struct sock *sk; | 812 | struct sock *sk; |
813 | int min_cov; | ||
886 | 814 | ||
887 | /* Step 1: Check header basics: */ | 815 | /* Step 1: Check header basics */ |
888 | 816 | ||
889 | if (dccp_invalid_packet(skb)) | 817 | if (dccp_invalid_packet(skb)) |
890 | goto discard_it; | 818 | goto discard_it; |
891 | 819 | ||
892 | /* If the header checksum is incorrect, drop packet and return */ | 820 | /* Step 1: If header checksum is incorrect, drop packet and return */ |
893 | if (dccp_v4_verify_checksum(skb, skb->nh.iph->saddr, | 821 | if (dccp_v4_csum_finish(skb, skb->nh.iph->saddr, skb->nh.iph->daddr)) { |
894 | skb->nh.iph->daddr) < 0) { | 822 | DCCP_WARN("dropped packet with invalid checksum\n"); |
895 | LIMIT_NETDEBUG(KERN_WARNING "%s: incorrect header checksum\n", | ||
896 | __FUNCTION__); | ||
897 | goto discard_it; | 823 | goto discard_it; |
898 | } | 824 | } |
899 | 825 | ||
@@ -915,8 +841,7 @@ static int dccp_v4_rcv(struct sk_buff *skb) | |||
915 | dccp_pr_debug_cat("\n"); | 841 | dccp_pr_debug_cat("\n"); |
916 | } else { | 842 | } else { |
917 | DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); | 843 | DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); |
918 | dccp_pr_debug_cat(", ack=%llu\n", | 844 | dccp_pr_debug_cat(", ack=%llu\n", (unsigned long long) |
919 | (unsigned long long) | ||
920 | DCCP_SKB_CB(skb)->dccpd_ack_seq); | 845 | DCCP_SKB_CB(skb)->dccpd_ack_seq); |
921 | } | 846 | } |
922 | 847 | ||
@@ -930,8 +855,6 @@ static int dccp_v4_rcv(struct sk_buff *skb) | |||
930 | /* | 855 | /* |
931 | * Step 2: | 856 | * Step 2: |
932 | * If no socket ... | 857 | * If no socket ... |
933 | * Generate Reset(No Connection) unless P.type == Reset | ||
934 | * Drop packet and return | ||
935 | */ | 858 | */ |
936 | if (sk == NULL) { | 859 | if (sk == NULL) { |
937 | dccp_pr_debug("failed to look up flow ID in table and " | 860 | dccp_pr_debug("failed to look up flow ID in table and " |
@@ -945,45 +868,55 @@ static int dccp_v4_rcv(struct sk_buff *skb) | |||
945 | * Generate Reset(No Connection) unless P.type == Reset | 868 | * Generate Reset(No Connection) unless P.type == Reset |
946 | * Drop packet and return | 869 | * Drop packet and return |
947 | */ | 870 | */ |
948 | |||
949 | if (sk->sk_state == DCCP_TIME_WAIT) { | 871 | if (sk->sk_state == DCCP_TIME_WAIT) { |
950 | dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: " | 872 | dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n"); |
951 | "do_time_wait\n"); | 873 | inet_twsk_put(inet_twsk(sk)); |
952 | goto do_time_wait; | 874 | goto no_dccp_socket; |
875 | } | ||
876 | |||
877 | /* | ||
878 | * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage | ||
879 | * o if MinCsCov = 0, only packets with CsCov = 0 are accepted | ||
880 | * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov | ||
881 | */ | ||
882 | min_cov = dccp_sk(sk)->dccps_pcrlen; | ||
883 | if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { | ||
884 | dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n", | ||
885 | dh->dccph_cscov, min_cov); | ||
886 | /* FIXME: "Such packets SHOULD be reported using Data Dropped | ||
887 | * options (Section 11.7) with Drop Code 0, Protocol | ||
888 | * Constraints." */ | ||
889 | goto discard_and_relse; | ||
953 | } | 890 | } |
954 | 891 | ||
955 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | 892 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) |
956 | goto discard_and_relse; | 893 | goto discard_and_relse; |
957 | nf_reset(skb); | 894 | nf_reset(skb); |
958 | 895 | ||
959 | return sk_receive_skb(sk, skb); | 896 | return sk_receive_skb(sk, skb, 1); |
960 | 897 | ||
961 | no_dccp_socket: | 898 | no_dccp_socket: |
962 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 899 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
963 | goto discard_it; | 900 | goto discard_it; |
964 | /* | 901 | /* |
965 | * Step 2: | 902 | * Step 2: |
903 | * If no socket ... | ||
966 | * Generate Reset(No Connection) unless P.type == Reset | 904 | * Generate Reset(No Connection) unless P.type == Reset |
967 | * Drop packet and return | 905 | * Drop packet and return |
968 | */ | 906 | */ |
969 | if (dh->dccph_type != DCCP_PKT_RESET) { | 907 | if (dh->dccph_type != DCCP_PKT_RESET) { |
970 | DCCP_SKB_CB(skb)->dccpd_reset_code = | 908 | DCCP_SKB_CB(skb)->dccpd_reset_code = |
971 | DCCP_RESET_CODE_NO_CONNECTION; | 909 | DCCP_RESET_CODE_NO_CONNECTION; |
972 | dccp_v4_ctl_send_reset(skb); | 910 | dccp_v4_ctl_send_reset(sk, skb); |
973 | } | 911 | } |
974 | 912 | ||
975 | discard_it: | 913 | discard_it: |
976 | /* Discard frame. */ | ||
977 | kfree_skb(skb); | 914 | kfree_skb(skb); |
978 | return 0; | 915 | return 0; |
979 | 916 | ||
980 | discard_and_relse: | 917 | discard_and_relse: |
981 | sock_put(sk); | 918 | sock_put(sk); |
982 | goto discard_it; | 919 | goto discard_it; |
983 | |||
984 | do_time_wait: | ||
985 | inet_twsk_put(inet_twsk(sk)); | ||
986 | goto no_dccp_socket; | ||
987 | } | 920 | } |
988 | 921 | ||
989 | static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { | 922 | static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { |
@@ -1017,20 +950,6 @@ static int dccp_v4_init_sock(struct sock *sk) | |||
1017 | return err; | 950 | return err; |
1018 | } | 951 | } |
1019 | 952 | ||
1020 | static void dccp_v4_reqsk_destructor(struct request_sock *req) | ||
1021 | { | ||
1022 | kfree(inet_rsk(req)->opt); | ||
1023 | } | ||
1024 | |||
1025 | static struct request_sock_ops dccp_request_sock_ops = { | ||
1026 | .family = PF_INET, | ||
1027 | .obj_size = sizeof(struct dccp_request_sock), | ||
1028 | .rtx_syn_ack = dccp_v4_send_response, | ||
1029 | .send_ack = dccp_v4_reqsk_send_ack, | ||
1030 | .destructor = dccp_v4_reqsk_destructor, | ||
1031 | .send_reset = dccp_v4_ctl_send_reset, | ||
1032 | }; | ||
1033 | |||
1034 | static struct timewait_sock_ops dccp_timewait_sock_ops = { | 953 | static struct timewait_sock_ops dccp_timewait_sock_ops = { |
1035 | .twsk_obj_size = sizeof(struct inet_timewait_sock), | 954 | .twsk_obj_size = sizeof(struct inet_timewait_sock), |
1036 | }; | 955 | }; |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index fc4242c0767c..c7aaa2574f52 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -36,13 +36,6 @@ | |||
36 | /* Socket used for sending RSTs and ACKs */ | 36 | /* Socket used for sending RSTs and ACKs */ |
37 | static struct socket *dccp_v6_ctl_socket; | 37 | static struct socket *dccp_v6_ctl_socket; |
38 | 38 | ||
39 | static void dccp_v6_ctl_send_reset(struct sk_buff *skb); | ||
40 | static void dccp_v6_reqsk_send_ack(struct sk_buff *skb, | ||
41 | struct request_sock *req); | ||
42 | static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb); | ||
43 | |||
44 | static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); | ||
45 | |||
46 | static struct inet_connection_sock_af_ops dccp_ipv6_mapped; | 39 | static struct inet_connection_sock_af_ops dccp_ipv6_mapped; |
47 | static struct inet_connection_sock_af_ops dccp_ipv6_af_ops; | 40 | static struct inet_connection_sock_af_ops dccp_ipv6_af_ops; |
48 | 41 | ||
@@ -65,205 +58,37 @@ static void dccp_v6_hash(struct sock *sk) | |||
65 | } | 58 | } |
66 | } | 59 | } |
67 | 60 | ||
68 | static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len, | 61 | /* add pseudo-header to DCCP checksum stored in skb->csum */ |
69 | struct in6_addr *saddr, | 62 | static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, |
70 | struct in6_addr *daddr, | 63 | struct in6_addr *saddr, |
71 | unsigned long base) | 64 | struct in6_addr *daddr) |
72 | { | 65 | { |
73 | return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base); | 66 | return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); |
74 | } | 67 | } |
75 | 68 | ||
76 | static __u32 dccp_v6_init_sequence(struct sock *sk, struct sk_buff *skb) | 69 | static inline void dccp_v6_send_check(struct sock *sk, int unused_value, |
70 | struct sk_buff *skb) | ||
77 | { | 71 | { |
78 | const struct dccp_hdr *dh = dccp_hdr(skb); | 72 | struct ipv6_pinfo *np = inet6_sk(sk); |
79 | 73 | struct dccp_hdr *dh = dccp_hdr(skb); | |
80 | if (skb->protocol == htons(ETH_P_IPV6)) | ||
81 | return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, | ||
82 | skb->nh.ipv6h->saddr.s6_addr32, | ||
83 | dh->dccph_dport, | ||
84 | dh->dccph_sport); | ||
85 | 74 | ||
86 | return secure_dccp_sequence_number(skb->nh.iph->daddr, | 75 | dccp_csum_outgoing(skb); |
87 | skb->nh.iph->saddr, | 76 | dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr); |
88 | dh->dccph_dport, | ||
89 | dh->dccph_sport); | ||
90 | } | 77 | } |
91 | 78 | ||
92 | static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | 79 | static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, |
93 | int addr_len) | 80 | __be16 sport, __be16 dport ) |
94 | { | 81 | { |
95 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr; | 82 | return secure_tcpv6_sequence_number(saddr, daddr, sport, dport); |
96 | struct inet_connection_sock *icsk = inet_csk(sk); | 83 | } |
97 | struct inet_sock *inet = inet_sk(sk); | ||
98 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
99 | struct dccp_sock *dp = dccp_sk(sk); | ||
100 | struct in6_addr *saddr = NULL, *final_p = NULL, final; | ||
101 | struct flowi fl; | ||
102 | struct dst_entry *dst; | ||
103 | int addr_type; | ||
104 | int err; | ||
105 | |||
106 | dp->dccps_role = DCCP_ROLE_CLIENT; | ||
107 | |||
108 | if (addr_len < SIN6_LEN_RFC2133) | ||
109 | return -EINVAL; | ||
110 | |||
111 | if (usin->sin6_family != AF_INET6) | ||
112 | return -EAFNOSUPPORT; | ||
113 | |||
114 | memset(&fl, 0, sizeof(fl)); | ||
115 | |||
116 | if (np->sndflow) { | ||
117 | fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; | ||
118 | IP6_ECN_flow_init(fl.fl6_flowlabel); | ||
119 | if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) { | ||
120 | struct ip6_flowlabel *flowlabel; | ||
121 | flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); | ||
122 | if (flowlabel == NULL) | ||
123 | return -EINVAL; | ||
124 | ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); | ||
125 | fl6_sock_release(flowlabel); | ||
126 | } | ||
127 | } | ||
128 | /* | ||
129 | * connect() to INADDR_ANY means loopback (BSD'ism). | ||
130 | */ | ||
131 | if (ipv6_addr_any(&usin->sin6_addr)) | ||
132 | usin->sin6_addr.s6_addr[15] = 1; | ||
133 | |||
134 | addr_type = ipv6_addr_type(&usin->sin6_addr); | ||
135 | |||
136 | if (addr_type & IPV6_ADDR_MULTICAST) | ||
137 | return -ENETUNREACH; | ||
138 | |||
139 | if (addr_type & IPV6_ADDR_LINKLOCAL) { | ||
140 | if (addr_len >= sizeof(struct sockaddr_in6) && | ||
141 | usin->sin6_scope_id) { | ||
142 | /* If interface is set while binding, indices | ||
143 | * must coincide. | ||
144 | */ | ||
145 | if (sk->sk_bound_dev_if && | ||
146 | sk->sk_bound_dev_if != usin->sin6_scope_id) | ||
147 | return -EINVAL; | ||
148 | |||
149 | sk->sk_bound_dev_if = usin->sin6_scope_id; | ||
150 | } | ||
151 | |||
152 | /* Connect to link-local address requires an interface */ | ||
153 | if (!sk->sk_bound_dev_if) | ||
154 | return -EINVAL; | ||
155 | } | ||
156 | |||
157 | ipv6_addr_copy(&np->daddr, &usin->sin6_addr); | ||
158 | np->flow_label = fl.fl6_flowlabel; | ||
159 | |||
160 | /* | ||
161 | * DCCP over IPv4 | ||
162 | */ | ||
163 | if (addr_type == IPV6_ADDR_MAPPED) { | ||
164 | u32 exthdrlen = icsk->icsk_ext_hdr_len; | ||
165 | struct sockaddr_in sin; | ||
166 | |||
167 | SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); | ||
168 | |||
169 | if (__ipv6_only_sock(sk)) | ||
170 | return -ENETUNREACH; | ||
171 | |||
172 | sin.sin_family = AF_INET; | ||
173 | sin.sin_port = usin->sin6_port; | ||
174 | sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; | ||
175 | |||
176 | icsk->icsk_af_ops = &dccp_ipv6_mapped; | ||
177 | sk->sk_backlog_rcv = dccp_v4_do_rcv; | ||
178 | |||
179 | err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); | ||
180 | if (err) { | ||
181 | icsk->icsk_ext_hdr_len = exthdrlen; | ||
182 | icsk->icsk_af_ops = &dccp_ipv6_af_ops; | ||
183 | sk->sk_backlog_rcv = dccp_v6_do_rcv; | ||
184 | goto failure; | ||
185 | } else { | ||
186 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), | ||
187 | inet->saddr); | ||
188 | ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), | ||
189 | inet->rcv_saddr); | ||
190 | } | ||
191 | |||
192 | return err; | ||
193 | } | ||
194 | |||
195 | if (!ipv6_addr_any(&np->rcv_saddr)) | ||
196 | saddr = &np->rcv_saddr; | ||
197 | |||
198 | fl.proto = IPPROTO_DCCP; | ||
199 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); | ||
200 | ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr); | ||
201 | fl.oif = sk->sk_bound_dev_if; | ||
202 | fl.fl_ip_dport = usin->sin6_port; | ||
203 | fl.fl_ip_sport = inet->sport; | ||
204 | security_sk_classify_flow(sk, &fl); | ||
205 | |||
206 | if (np->opt != NULL && np->opt->srcrt != NULL) { | ||
207 | const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | ||
208 | |||
209 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
210 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
211 | final_p = &final; | ||
212 | } | ||
213 | |||
214 | err = ip6_dst_lookup(sk, &dst, &fl); | ||
215 | if (err) | ||
216 | goto failure; | ||
217 | |||
218 | if (final_p) | ||
219 | ipv6_addr_copy(&fl.fl6_dst, final_p); | ||
220 | |||
221 | err = xfrm_lookup(&dst, &fl, sk, 0); | ||
222 | if (err < 0) | ||
223 | goto failure; | ||
224 | |||
225 | if (saddr == NULL) { | ||
226 | saddr = &fl.fl6_src; | ||
227 | ipv6_addr_copy(&np->rcv_saddr, saddr); | ||
228 | } | ||
229 | |||
230 | /* set the source address */ | ||
231 | ipv6_addr_copy(&np->saddr, saddr); | ||
232 | inet->rcv_saddr = LOOPBACK4_IPV6; | ||
233 | |||
234 | __ip6_dst_store(sk, dst, NULL, NULL); | ||
235 | |||
236 | icsk->icsk_ext_hdr_len = 0; | ||
237 | if (np->opt != NULL) | ||
238 | icsk->icsk_ext_hdr_len = (np->opt->opt_flen + | ||
239 | np->opt->opt_nflen); | ||
240 | |||
241 | inet->dport = usin->sin6_port; | ||
242 | |||
243 | dccp_set_state(sk, DCCP_REQUESTING); | ||
244 | err = inet6_hash_connect(&dccp_death_row, sk); | ||
245 | if (err) | ||
246 | goto late_failure; | ||
247 | /* FIXME */ | ||
248 | #if 0 | ||
249 | dp->dccps_gar = secure_dccp_v6_sequence_number(np->saddr.s6_addr32, | ||
250 | np->daddr.s6_addr32, | ||
251 | inet->sport, | ||
252 | inet->dport); | ||
253 | #endif | ||
254 | err = dccp_connect(sk); | ||
255 | if (err) | ||
256 | goto late_failure; | ||
257 | 84 | ||
258 | return 0; | 85 | static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb) |
86 | { | ||
87 | return secure_dccpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, | ||
88 | skb->nh.ipv6h->saddr.s6_addr32, | ||
89 | dccp_hdr(skb)->dccph_dport, | ||
90 | dccp_hdr(skb)->dccph_sport ); | ||
259 | 91 | ||
260 | late_failure: | ||
261 | dccp_set_state(sk, DCCP_CLOSED); | ||
262 | __sk_dst_reset(sk); | ||
263 | failure: | ||
264 | inet->dport = 0; | ||
265 | sk->sk_route_caps = 0; | ||
266 | return err; | ||
267 | } | 92 | } |
268 | 93 | ||
269 | static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 94 | static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
@@ -464,16 +289,12 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
464 | if (skb != NULL) { | 289 | if (skb != NULL) { |
465 | struct dccp_hdr *dh = dccp_hdr(skb); | 290 | struct dccp_hdr *dh = dccp_hdr(skb); |
466 | 291 | ||
467 | dh->dccph_checksum = dccp_v6_check(dh, skb->len, | 292 | dh->dccph_checksum = dccp_v6_csum_finish(skb, |
468 | &ireq6->loc_addr, | 293 | &ireq6->loc_addr, |
469 | &ireq6->rmt_addr, | 294 | &ireq6->rmt_addr); |
470 | csum_partial((char *)dh, | ||
471 | skb->len, | ||
472 | skb->csum)); | ||
473 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); | 295 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); |
474 | err = ip6_xmit(sk, skb, &fl, opt, 0); | 296 | err = ip6_xmit(sk, skb, &fl, opt, 0); |
475 | if (err == NET_XMIT_CN) | 297 | err = net_xmit_eval(err); |
476 | err = 0; | ||
477 | } | 298 | } |
478 | 299 | ||
479 | done: | 300 | done: |
@@ -489,32 +310,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req) | |||
489 | kfree_skb(inet6_rsk(req)->pktopts); | 310 | kfree_skb(inet6_rsk(req)->pktopts); |
490 | } | 311 | } |
491 | 312 | ||
492 | static struct request_sock_ops dccp6_request_sock_ops = { | 313 | static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) |
493 | .family = AF_INET6, | ||
494 | .obj_size = sizeof(struct dccp6_request_sock), | ||
495 | .rtx_syn_ack = dccp_v6_send_response, | ||
496 | .send_ack = dccp_v6_reqsk_send_ack, | ||
497 | .destructor = dccp_v6_reqsk_destructor, | ||
498 | .send_reset = dccp_v6_ctl_send_reset, | ||
499 | }; | ||
500 | |||
501 | static struct timewait_sock_ops dccp6_timewait_sock_ops = { | ||
502 | .twsk_obj_size = sizeof(struct dccp6_timewait_sock), | ||
503 | }; | ||
504 | |||
505 | static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) | ||
506 | { | ||
507 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
508 | struct dccp_hdr *dh = dccp_hdr(skb); | ||
509 | |||
510 | dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr, | ||
511 | len, IPPROTO_DCCP, | ||
512 | csum_partial((char *)dh, | ||
513 | dh->dccph_doff << 2, | ||
514 | skb->csum)); | ||
515 | } | ||
516 | |||
517 | static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) | ||
518 | { | 314 | { |
519 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; | 315 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; |
520 | const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + | 316 | const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + |
@@ -522,7 +318,7 @@ static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) | |||
522 | sizeof(struct dccp_hdr_reset); | 318 | sizeof(struct dccp_hdr_reset); |
523 | struct sk_buff *skb; | 319 | struct sk_buff *skb; |
524 | struct flowi fl; | 320 | struct flowi fl; |
525 | u64 seqno; | 321 | u64 seqno = 0; |
526 | 322 | ||
527 | if (rxdh->dccph_type == DCCP_PKT_RESET) | 323 | if (rxdh->dccph_type == DCCP_PKT_RESET) |
528 | return; | 324 | return; |
@@ -537,9 +333,7 @@ static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) | |||
537 | 333 | ||
538 | skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header); | 334 | skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header); |
539 | 335 | ||
540 | skb->h.raw = skb_push(skb, dccp_hdr_reset_len); | 336 | dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len); |
541 | dh = dccp_hdr(skb); | ||
542 | memset(dh, 0, dccp_hdr_reset_len); | ||
543 | 337 | ||
544 | /* Swap the send and the receive. */ | 338 | /* Swap the send and the receive. */ |
545 | dh->dccph_type = DCCP_PKT_RESET; | 339 | dh->dccph_type = DCCP_PKT_RESET; |
@@ -551,20 +345,20 @@ static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) | |||
551 | DCCP_SKB_CB(rxskb)->dccpd_reset_code; | 345 | DCCP_SKB_CB(rxskb)->dccpd_reset_code; |
552 | 346 | ||
553 | /* See "8.3.1. Abnormal Termination" in RFC 4340 */ | 347 | /* See "8.3.1. Abnormal Termination" in RFC 4340 */ |
554 | seqno = 0; | ||
555 | if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) | 348 | if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) |
556 | dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1); | 349 | dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1); |
557 | 350 | ||
558 | dccp_hdr_set_seq(dh, seqno); | 351 | dccp_hdr_set_seq(dh, seqno); |
559 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), | 352 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq); |
560 | DCCP_SKB_CB(rxskb)->dccpd_seq); | 353 | |
354 | dccp_csum_outgoing(skb); | ||
355 | dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxskb->nh.ipv6h->saddr, | ||
356 | &rxskb->nh.ipv6h->daddr); | ||
561 | 357 | ||
562 | memset(&fl, 0, sizeof(fl)); | 358 | memset(&fl, 0, sizeof(fl)); |
563 | ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr); | 359 | ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr); |
564 | ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr); | 360 | ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr); |
565 | dh->dccph_checksum = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, | 361 | |
566 | sizeof(*dh), IPPROTO_DCCP, | ||
567 | skb->csum); | ||
568 | fl.proto = IPPROTO_DCCP; | 362 | fl.proto = IPPROTO_DCCP; |
569 | fl.oif = inet6_iif(rxskb); | 363 | fl.oif = inet6_iif(rxskb); |
570 | fl.fl_ip_dport = dh->dccph_dport; | 364 | fl.fl_ip_dport = dh->dccph_dport; |
@@ -584,60 +378,14 @@ static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) | |||
584 | kfree_skb(skb); | 378 | kfree_skb(skb); |
585 | } | 379 | } |
586 | 380 | ||
587 | static void dccp_v6_reqsk_send_ack(struct sk_buff *rxskb, | 381 | static struct request_sock_ops dccp6_request_sock_ops = { |
588 | struct request_sock *req) | 382 | .family = AF_INET6, |
589 | { | 383 | .obj_size = sizeof(struct dccp6_request_sock), |
590 | struct flowi fl; | 384 | .rtx_syn_ack = dccp_v6_send_response, |
591 | struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; | 385 | .send_ack = dccp_reqsk_send_ack, |
592 | const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) + | 386 | .destructor = dccp_v6_reqsk_destructor, |
593 | sizeof(struct dccp_hdr_ext) + | 387 | .send_reset = dccp_v6_ctl_send_reset, |
594 | sizeof(struct dccp_hdr_ack_bits); | 388 | }; |
595 | struct sk_buff *skb; | ||
596 | |||
597 | skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header, | ||
598 | GFP_ATOMIC); | ||
599 | if (skb == NULL) | ||
600 | return; | ||
601 | |||
602 | skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header); | ||
603 | |||
604 | skb->h.raw = skb_push(skb, dccp_hdr_ack_len); | ||
605 | dh = dccp_hdr(skb); | ||
606 | memset(dh, 0, dccp_hdr_ack_len); | ||
607 | |||
608 | /* Build DCCP header and checksum it. */ | ||
609 | dh->dccph_type = DCCP_PKT_ACK; | ||
610 | dh->dccph_sport = rxdh->dccph_dport; | ||
611 | dh->dccph_dport = rxdh->dccph_sport; | ||
612 | dh->dccph_doff = dccp_hdr_ack_len / 4; | ||
613 | dh->dccph_x = 1; | ||
614 | |||
615 | dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq); | ||
616 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), | ||
617 | DCCP_SKB_CB(rxskb)->dccpd_seq); | ||
618 | |||
619 | memset(&fl, 0, sizeof(fl)); | ||
620 | ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr); | ||
621 | ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr); | ||
622 | |||
623 | /* FIXME: calculate checksum, IPv4 also should... */ | ||
624 | |||
625 | fl.proto = IPPROTO_DCCP; | ||
626 | fl.oif = inet6_iif(rxskb); | ||
627 | fl.fl_ip_dport = dh->dccph_dport; | ||
628 | fl.fl_ip_sport = dh->dccph_sport; | ||
629 | security_req_classify_flow(req, &fl); | ||
630 | |||
631 | if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) { | ||
632 | if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) { | ||
633 | ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0); | ||
634 | DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); | ||
635 | return; | ||
636 | } | ||
637 | } | ||
638 | |||
639 | kfree_skb(skb); | ||
640 | } | ||
641 | 389 | ||
642 | static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) | 390 | static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) |
643 | { | 391 | { |
@@ -672,7 +420,6 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) | |||
672 | 420 | ||
673 | static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | 421 | static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) |
674 | { | 422 | { |
675 | struct dccp_sock dp; | ||
676 | struct request_sock *req; | 423 | struct request_sock *req; |
677 | struct dccp_request_sock *dreq; | 424 | struct dccp_request_sock *dreq; |
678 | struct inet6_request_sock *ireq6; | 425 | struct inet6_request_sock *ireq6; |
@@ -704,9 +451,10 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
704 | if (req == NULL) | 451 | if (req == NULL) |
705 | goto drop; | 452 | goto drop; |
706 | 453 | ||
707 | /* FIXME: process options */ | 454 | if (dccp_parse_options(sk, skb)) |
455 | goto drop_and_free; | ||
708 | 456 | ||
709 | dccp_openreq_init(req, &dp, skb); | 457 | dccp_reqsk_init(req, skb); |
710 | 458 | ||
711 | if (security_inet_conn_request(sk, skb, req)) | 459 | if (security_inet_conn_request(sk, skb, req)) |
712 | goto drop_and_free; | 460 | goto drop_and_free; |
@@ -714,7 +462,6 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
714 | ireq6 = inet6_rsk(req); | 462 | ireq6 = inet6_rsk(req); |
715 | ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr); | 463 | ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr); |
716 | ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr); | 464 | ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr); |
717 | req->rcv_wnd = dccp_feat_default_sequence_window; | ||
718 | ireq6->pktopts = NULL; | 465 | ireq6->pktopts = NULL; |
719 | 466 | ||
720 | if (ipv6_opt_accepted(sk, skb) || | 467 | if (ipv6_opt_accepted(sk, skb) || |
@@ -733,14 +480,14 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
733 | /* | 480 | /* |
734 | * Step 3: Process LISTEN state | 481 | * Step 3: Process LISTEN state |
735 | * | 482 | * |
736 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie | 483 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie |
737 | * | 484 | * |
738 | * In fact we defer setting S.GSR, S.SWL, S.SWH to | 485 | * In fact we defer setting S.GSR, S.SWL, S.SWH to |
739 | * dccp_create_openreq_child. | 486 | * dccp_create_openreq_child. |
740 | */ | 487 | */ |
741 | dreq = dccp_rsk(req); | 488 | dreq = dccp_rsk(req); |
742 | dreq->dreq_isr = dcb->dccpd_seq; | 489 | dreq->dreq_isr = dcb->dccpd_seq; |
743 | dreq->dreq_iss = dccp_v6_init_sequence(sk, skb); | 490 | dreq->dreq_iss = dccp_v6_init_sequence(skb); |
744 | dreq->dreq_service = service; | 491 | dreq->dreq_service = service; |
745 | 492 | ||
746 | if (dccp_v6_send_response(sk, req, NULL)) | 493 | if (dccp_v6_send_response(sk, req, NULL)) |
@@ -990,18 +737,46 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
990 | --ANK (980728) | 737 | --ANK (980728) |
991 | */ | 738 | */ |
992 | if (np->rxopt.all) | 739 | if (np->rxopt.all) |
740 | /* | ||
741 | * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below | ||
742 | * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example. | ||
743 | */ | ||
993 | opt_skb = skb_clone(skb, GFP_ATOMIC); | 744 | opt_skb = skb_clone(skb, GFP_ATOMIC); |
994 | 745 | ||
995 | if (sk->sk_state == DCCP_OPEN) { /* Fast path */ | 746 | if (sk->sk_state == DCCP_OPEN) { /* Fast path */ |
996 | if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len)) | 747 | if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len)) |
997 | goto reset; | 748 | goto reset; |
998 | if (opt_skb) { | 749 | if (opt_skb) { |
999 | /* This is where we would goto ipv6_pktoptions. */ | 750 | /* XXX This is where we would goto ipv6_pktoptions. */ |
1000 | __kfree_skb(opt_skb); | 751 | __kfree_skb(opt_skb); |
1001 | } | 752 | } |
1002 | return 0; | 753 | return 0; |
1003 | } | 754 | } |
1004 | 755 | ||
756 | /* | ||
757 | * Step 3: Process LISTEN state | ||
758 | * If S.state == LISTEN, | ||
759 | * If P.type == Request or P contains a valid Init Cookie option, | ||
760 | * (* Must scan the packet's options to check for Init | ||
761 | * Cookies. Only Init Cookies are processed here, | ||
762 | * however; other options are processed in Step 8. This | ||
763 | * scan need only be performed if the endpoint uses Init | ||
764 | * Cookies *) | ||
765 | * (* Generate a new socket and switch to that socket *) | ||
766 | * Set S := new socket for this port pair | ||
767 | * S.state = RESPOND | ||
768 | * Choose S.ISS (initial seqno) or set from Init Cookies | ||
769 | * Initialize S.GAR := S.ISS | ||
770 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies | ||
771 | * Continue with S.state == RESPOND | ||
772 | * (* A Response packet will be generated in Step 11 *) | ||
773 | * Otherwise, | ||
774 | * Generate Reset(No Connection) unless P.type == Reset | ||
775 | * Drop packet and return | ||
776 | * | ||
777 | * NOTE: the check for the packet types is done in | ||
778 | * dccp_rcv_state_process | ||
779 | */ | ||
1005 | if (sk->sk_state == DCCP_LISTEN) { | 780 | if (sk->sk_state == DCCP_LISTEN) { |
1006 | struct sock *nsk = dccp_v6_hnd_req(sk, skb); | 781 | struct sock *nsk = dccp_v6_hnd_req(sk, skb); |
1007 | 782 | ||
@@ -1024,13 +799,13 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1024 | if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len)) | 799 | if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len)) |
1025 | goto reset; | 800 | goto reset; |
1026 | if (opt_skb) { | 801 | if (opt_skb) { |
1027 | /* This is where we would goto ipv6_pktoptions. */ | 802 | /* XXX This is where we would goto ipv6_pktoptions. */ |
1028 | __kfree_skb(opt_skb); | 803 | __kfree_skb(opt_skb); |
1029 | } | 804 | } |
1030 | return 0; | 805 | return 0; |
1031 | 806 | ||
1032 | reset: | 807 | reset: |
1033 | dccp_v6_ctl_send_reset(skb); | 808 | dccp_v6_ctl_send_reset(sk, skb); |
1034 | discard: | 809 | discard: |
1035 | if (opt_skb != NULL) | 810 | if (opt_skb != NULL) |
1036 | __kfree_skb(opt_skb); | 811 | __kfree_skb(opt_skb); |
@@ -1043,12 +818,20 @@ static int dccp_v6_rcv(struct sk_buff **pskb) | |||
1043 | const struct dccp_hdr *dh; | 818 | const struct dccp_hdr *dh; |
1044 | struct sk_buff *skb = *pskb; | 819 | struct sk_buff *skb = *pskb; |
1045 | struct sock *sk; | 820 | struct sock *sk; |
821 | int min_cov; | ||
1046 | 822 | ||
1047 | /* Step 1: Check header basics: */ | 823 | /* Step 1: Check header basics */ |
1048 | 824 | ||
1049 | if (dccp_invalid_packet(skb)) | 825 | if (dccp_invalid_packet(skb)) |
1050 | goto discard_it; | 826 | goto discard_it; |
1051 | 827 | ||
828 | /* Step 1: If header checksum is incorrect, drop packet and return. */ | ||
829 | if (dccp_v6_csum_finish(skb, &skb->nh.ipv6h->saddr, | ||
830 | &skb->nh.ipv6h->daddr)) { | ||
831 | DCCP_WARN("dropped packet with invalid checksum\n"); | ||
832 | goto discard_it; | ||
833 | } | ||
834 | |||
1052 | dh = dccp_hdr(skb); | 835 | dh = dccp_hdr(skb); |
1053 | 836 | ||
1054 | DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); | 837 | DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); |
@@ -1068,11 +851,12 @@ static int dccp_v6_rcv(struct sk_buff **pskb) | |||
1068 | /* | 851 | /* |
1069 | * Step 2: | 852 | * Step 2: |
1070 | * If no socket ... | 853 | * If no socket ... |
1071 | * Generate Reset(No Connection) unless P.type == Reset | ||
1072 | * Drop packet and return | ||
1073 | */ | 854 | */ |
1074 | if (sk == NULL) | 855 | if (sk == NULL) { |
856 | dccp_pr_debug("failed to look up flow ID in table and " | ||
857 | "get corresponding socket\n"); | ||
1075 | goto no_dccp_socket; | 858 | goto no_dccp_socket; |
859 | } | ||
1076 | 860 | ||
1077 | /* | 861 | /* |
1078 | * Step 2: | 862 | * Step 2: |
@@ -1080,43 +864,226 @@ static int dccp_v6_rcv(struct sk_buff **pskb) | |||
1080 | * Generate Reset(No Connection) unless P.type == Reset | 864 | * Generate Reset(No Connection) unless P.type == Reset |
1081 | * Drop packet and return | 865 | * Drop packet and return |
1082 | */ | 866 | */ |
1083 | if (sk->sk_state == DCCP_TIME_WAIT) | 867 | if (sk->sk_state == DCCP_TIME_WAIT) { |
1084 | goto do_time_wait; | 868 | dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n"); |
869 | inet_twsk_put(inet_twsk(sk)); | ||
870 | goto no_dccp_socket; | ||
871 | } | ||
872 | |||
873 | /* | ||
874 | * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage | ||
875 | * o if MinCsCov = 0, only packets with CsCov = 0 are accepted | ||
876 | * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov | ||
877 | */ | ||
878 | min_cov = dccp_sk(sk)->dccps_pcrlen; | ||
879 | if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { | ||
880 | dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n", | ||
881 | dh->dccph_cscov, min_cov); | ||
882 | /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */ | ||
883 | goto discard_and_relse; | ||
884 | } | ||
1085 | 885 | ||
1086 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | 886 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
1087 | goto discard_and_relse; | 887 | goto discard_and_relse; |
1088 | 888 | ||
1089 | return sk_receive_skb(sk, skb) ? -1 : 0; | 889 | return sk_receive_skb(sk, skb, 1) ? -1 : 0; |
1090 | 890 | ||
1091 | no_dccp_socket: | 891 | no_dccp_socket: |
1092 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) | 892 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) |
1093 | goto discard_it; | 893 | goto discard_it; |
1094 | /* | 894 | /* |
1095 | * Step 2: | 895 | * Step 2: |
896 | * If no socket ... | ||
1096 | * Generate Reset(No Connection) unless P.type == Reset | 897 | * Generate Reset(No Connection) unless P.type == Reset |
1097 | * Drop packet and return | 898 | * Drop packet and return |
1098 | */ | 899 | */ |
1099 | if (dh->dccph_type != DCCP_PKT_RESET) { | 900 | if (dh->dccph_type != DCCP_PKT_RESET) { |
1100 | DCCP_SKB_CB(skb)->dccpd_reset_code = | 901 | DCCP_SKB_CB(skb)->dccpd_reset_code = |
1101 | DCCP_RESET_CODE_NO_CONNECTION; | 902 | DCCP_RESET_CODE_NO_CONNECTION; |
1102 | dccp_v6_ctl_send_reset(skb); | 903 | dccp_v6_ctl_send_reset(sk, skb); |
1103 | } | 904 | } |
1104 | discard_it: | ||
1105 | |||
1106 | /* | ||
1107 | * Discard frame | ||
1108 | */ | ||
1109 | 905 | ||
906 | discard_it: | ||
1110 | kfree_skb(skb); | 907 | kfree_skb(skb); |
1111 | return 0; | 908 | return 0; |
1112 | 909 | ||
1113 | discard_and_relse: | 910 | discard_and_relse: |
1114 | sock_put(sk); | 911 | sock_put(sk); |
1115 | goto discard_it; | 912 | goto discard_it; |
913 | } | ||
1116 | 914 | ||
1117 | do_time_wait: | 915 | static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, |
1118 | inet_twsk_put(inet_twsk(sk)); | 916 | int addr_len) |
1119 | goto no_dccp_socket; | 917 | { |
918 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr; | ||
919 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
920 | struct inet_sock *inet = inet_sk(sk); | ||
921 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
922 | struct dccp_sock *dp = dccp_sk(sk); | ||
923 | struct in6_addr *saddr = NULL, *final_p = NULL, final; | ||
924 | struct flowi fl; | ||
925 | struct dst_entry *dst; | ||
926 | int addr_type; | ||
927 | int err; | ||
928 | |||
929 | dp->dccps_role = DCCP_ROLE_CLIENT; | ||
930 | |||
931 | if (addr_len < SIN6_LEN_RFC2133) | ||
932 | return -EINVAL; | ||
933 | |||
934 | if (usin->sin6_family != AF_INET6) | ||
935 | return -EAFNOSUPPORT; | ||
936 | |||
937 | memset(&fl, 0, sizeof(fl)); | ||
938 | |||
939 | if (np->sndflow) { | ||
940 | fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; | ||
941 | IP6_ECN_flow_init(fl.fl6_flowlabel); | ||
942 | if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) { | ||
943 | struct ip6_flowlabel *flowlabel; | ||
944 | flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); | ||
945 | if (flowlabel == NULL) | ||
946 | return -EINVAL; | ||
947 | ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); | ||
948 | fl6_sock_release(flowlabel); | ||
949 | } | ||
950 | } | ||
951 | /* | ||
952 | * connect() to INADDR_ANY means loopback (BSD'ism). | ||
953 | */ | ||
954 | if (ipv6_addr_any(&usin->sin6_addr)) | ||
955 | usin->sin6_addr.s6_addr[15] = 1; | ||
956 | |||
957 | addr_type = ipv6_addr_type(&usin->sin6_addr); | ||
958 | |||
959 | if (addr_type & IPV6_ADDR_MULTICAST) | ||
960 | return -ENETUNREACH; | ||
961 | |||
962 | if (addr_type & IPV6_ADDR_LINKLOCAL) { | ||
963 | if (addr_len >= sizeof(struct sockaddr_in6) && | ||
964 | usin->sin6_scope_id) { | ||
965 | /* If interface is set while binding, indices | ||
966 | * must coincide. | ||
967 | */ | ||
968 | if (sk->sk_bound_dev_if && | ||
969 | sk->sk_bound_dev_if != usin->sin6_scope_id) | ||
970 | return -EINVAL; | ||
971 | |||
972 | sk->sk_bound_dev_if = usin->sin6_scope_id; | ||
973 | } | ||
974 | |||
975 | /* Connect to link-local address requires an interface */ | ||
976 | if (!sk->sk_bound_dev_if) | ||
977 | return -EINVAL; | ||
978 | } | ||
979 | |||
980 | ipv6_addr_copy(&np->daddr, &usin->sin6_addr); | ||
981 | np->flow_label = fl.fl6_flowlabel; | ||
982 | |||
983 | /* | ||
984 | * DCCP over IPv4 | ||
985 | */ | ||
986 | if (addr_type == IPV6_ADDR_MAPPED) { | ||
987 | u32 exthdrlen = icsk->icsk_ext_hdr_len; | ||
988 | struct sockaddr_in sin; | ||
989 | |||
990 | SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); | ||
991 | |||
992 | if (__ipv6_only_sock(sk)) | ||
993 | return -ENETUNREACH; | ||
994 | |||
995 | sin.sin_family = AF_INET; | ||
996 | sin.sin_port = usin->sin6_port; | ||
997 | sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; | ||
998 | |||
999 | icsk->icsk_af_ops = &dccp_ipv6_mapped; | ||
1000 | sk->sk_backlog_rcv = dccp_v4_do_rcv; | ||
1001 | |||
1002 | err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); | ||
1003 | if (err) { | ||
1004 | icsk->icsk_ext_hdr_len = exthdrlen; | ||
1005 | icsk->icsk_af_ops = &dccp_ipv6_af_ops; | ||
1006 | sk->sk_backlog_rcv = dccp_v6_do_rcv; | ||
1007 | goto failure; | ||
1008 | } else { | ||
1009 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), | ||
1010 | inet->saddr); | ||
1011 | ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), | ||
1012 | inet->rcv_saddr); | ||
1013 | } | ||
1014 | |||
1015 | return err; | ||
1016 | } | ||
1017 | |||
1018 | if (!ipv6_addr_any(&np->rcv_saddr)) | ||
1019 | saddr = &np->rcv_saddr; | ||
1020 | |||
1021 | fl.proto = IPPROTO_DCCP; | ||
1022 | ipv6_addr_copy(&fl.fl6_dst, &np->daddr); | ||
1023 | ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr); | ||
1024 | fl.oif = sk->sk_bound_dev_if; | ||
1025 | fl.fl_ip_dport = usin->sin6_port; | ||
1026 | fl.fl_ip_sport = inet->sport; | ||
1027 | security_sk_classify_flow(sk, &fl); | ||
1028 | |||
1029 | if (np->opt != NULL && np->opt->srcrt != NULL) { | ||
1030 | const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | ||
1031 | |||
1032 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
1033 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
1034 | final_p = &final; | ||
1035 | } | ||
1036 | |||
1037 | err = ip6_dst_lookup(sk, &dst, &fl); | ||
1038 | if (err) | ||
1039 | goto failure; | ||
1040 | |||
1041 | if (final_p) | ||
1042 | ipv6_addr_copy(&fl.fl6_dst, final_p); | ||
1043 | |||
1044 | err = xfrm_lookup(&dst, &fl, sk, 0); | ||
1045 | if (err < 0) | ||
1046 | goto failure; | ||
1047 | |||
1048 | if (saddr == NULL) { | ||
1049 | saddr = &fl.fl6_src; | ||
1050 | ipv6_addr_copy(&np->rcv_saddr, saddr); | ||
1051 | } | ||
1052 | |||
1053 | /* set the source address */ | ||
1054 | ipv6_addr_copy(&np->saddr, saddr); | ||
1055 | inet->rcv_saddr = LOOPBACK4_IPV6; | ||
1056 | |||
1057 | __ip6_dst_store(sk, dst, NULL, NULL); | ||
1058 | |||
1059 | icsk->icsk_ext_hdr_len = 0; | ||
1060 | if (np->opt != NULL) | ||
1061 | icsk->icsk_ext_hdr_len = (np->opt->opt_flen + | ||
1062 | np->opt->opt_nflen); | ||
1063 | |||
1064 | inet->dport = usin->sin6_port; | ||
1065 | |||
1066 | dccp_set_state(sk, DCCP_REQUESTING); | ||
1067 | err = inet6_hash_connect(&dccp_death_row, sk); | ||
1068 | if (err) | ||
1069 | goto late_failure; | ||
1070 | |||
1071 | dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32, | ||
1072 | np->daddr.s6_addr32, | ||
1073 | inet->sport, inet->dport); | ||
1074 | err = dccp_connect(sk); | ||
1075 | if (err) | ||
1076 | goto late_failure; | ||
1077 | |||
1078 | return 0; | ||
1079 | |||
1080 | late_failure: | ||
1081 | dccp_set_state(sk, DCCP_CLOSED); | ||
1082 | __sk_dst_reset(sk); | ||
1083 | failure: | ||
1084 | inet->dport = 0; | ||
1085 | sk->sk_route_caps = 0; | ||
1086 | return err; | ||
1120 | } | 1087 | } |
1121 | 1088 | ||
1122 | static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { | 1089 | static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { |
@@ -1179,6 +1146,10 @@ static int dccp_v6_destroy_sock(struct sock *sk) | |||
1179 | return inet6_destroy_sock(sk); | 1146 | return inet6_destroy_sock(sk); |
1180 | } | 1147 | } |
1181 | 1148 | ||
1149 | static struct timewait_sock_ops dccp6_timewait_sock_ops = { | ||
1150 | .twsk_obj_size = sizeof(struct dccp6_timewait_sock), | ||
1151 | }; | ||
1152 | |||
1182 | static struct proto dccp_v6_prot = { | 1153 | static struct proto dccp_v6_prot = { |
1183 | .name = "DCCPv6", | 1154 | .name = "DCCPv6", |
1184 | .owner = THIS_MODULE, | 1155 | .owner = THIS_MODULE, |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 36db5be2a9e9..4c9e26775f72 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/dccp.h> | 13 | #include <linux/dccp.h> |
14 | #include <linux/kernel.h> | ||
14 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
15 | #include <linux/timer.h> | 16 | #include <linux/timer.h> |
16 | 17 | ||
@@ -82,8 +83,7 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) | |||
82 | * socket up. We've got bigger problems than | 83 | * socket up. We've got bigger problems than |
83 | * non-graceful socket closings. | 84 | * non-graceful socket closings. |
84 | */ | 85 | */ |
85 | LIMIT_NETDEBUG(KERN_INFO "DCCP: time wait bucket " | 86 | DCCP_WARN("time wait bucket table overflow\n"); |
86 | "table overflow\n"); | ||
87 | } | 87 | } |
88 | 88 | ||
89 | dccp_done(sk); | 89 | dccp_done(sk); |
@@ -96,8 +96,8 @@ struct sock *dccp_create_openreq_child(struct sock *sk, | |||
96 | /* | 96 | /* |
97 | * Step 3: Process LISTEN state | 97 | * Step 3: Process LISTEN state |
98 | * | 98 | * |
99 | * // Generate a new socket and switch to that socket | 99 | * (* Generate a new socket and switch to that socket *) |
100 | * Set S := new socket for this port pair | 100 | * Set S := new socket for this port pair |
101 | */ | 101 | */ |
102 | struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC); | 102 | struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC); |
103 | 103 | ||
@@ -146,9 +146,9 @@ out_free: | |||
146 | /* | 146 | /* |
147 | * Step 3: Process LISTEN state | 147 | * Step 3: Process LISTEN state |
148 | * | 148 | * |
149 | * Choose S.ISS (initial seqno) or set from Init Cookie | 149 | * Choose S.ISS (initial seqno) or set from Init Cookies |
150 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init | 150 | * Initialize S.GAR := S.ISS |
151 | * Cookie | 151 | * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies |
152 | */ | 152 | */ |
153 | 153 | ||
154 | /* See dccp_v4_conn_request */ | 154 | /* See dccp_v4_conn_request */ |
@@ -194,15 +194,17 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, | |||
194 | 194 | ||
195 | /* Check for retransmitted REQUEST */ | 195 | /* Check for retransmitted REQUEST */ |
196 | if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { | 196 | if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { |
197 | if (after48(DCCP_SKB_CB(skb)->dccpd_seq, | 197 | struct dccp_request_sock *dreq = dccp_rsk(req); |
198 | dccp_rsk(req)->dreq_isr)) { | ||
199 | struct dccp_request_sock *dreq = dccp_rsk(req); | ||
200 | 198 | ||
199 | if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_isr)) { | ||
201 | dccp_pr_debug("Retransmitted REQUEST\n"); | 200 | dccp_pr_debug("Retransmitted REQUEST\n"); |
202 | /* Send another RESPONSE packet */ | 201 | dreq->dreq_isr = DCCP_SKB_CB(skb)->dccpd_seq; |
203 | dccp_set_seqno(&dreq->dreq_iss, dreq->dreq_iss + 1); | 202 | /* |
204 | dccp_set_seqno(&dreq->dreq_isr, | 203 | * Send another RESPONSE packet |
205 | DCCP_SKB_CB(skb)->dccpd_seq); | 204 | * To protect against Request floods, increment retrans |
205 | * counter (backoff, monitored by dccp_response_timer). | ||
206 | */ | ||
207 | req->retrans++; | ||
206 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); | 208 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); |
207 | } | 209 | } |
208 | /* Network Duplicate, discard packet */ | 210 | /* Network Duplicate, discard packet */ |
@@ -242,7 +244,7 @@ listen_overflow: | |||
242 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; | 244 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; |
243 | drop: | 245 | drop: |
244 | if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) | 246 | if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) |
245 | req->rsk_ops->send_reset(skb); | 247 | req->rsk_ops->send_reset(sk, skb); |
246 | 248 | ||
247 | inet_csk_reqsk_queue_drop(sk, req, prev); | 249 | inet_csk_reqsk_queue_drop(sk, req, prev); |
248 | goto out; | 250 | goto out; |
@@ -282,3 +284,19 @@ int dccp_child_process(struct sock *parent, struct sock *child, | |||
282 | } | 284 | } |
283 | 285 | ||
284 | EXPORT_SYMBOL_GPL(dccp_child_process); | 286 | EXPORT_SYMBOL_GPL(dccp_child_process); |
287 | |||
288 | void dccp_reqsk_send_ack(struct sk_buff *skb, struct request_sock *rsk) | ||
289 | { | ||
290 | DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state"); | ||
291 | } | ||
292 | |||
293 | EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack); | ||
294 | |||
295 | void dccp_reqsk_init(struct request_sock *req, struct sk_buff *skb) | ||
296 | { | ||
297 | inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport; | ||
298 | inet_rsk(req)->acked = 0; | ||
299 | req->rcv_wnd = sysctl_dccp_feat_sequence_window; | ||
300 | } | ||
301 | |||
302 | EXPORT_SYMBOL_GPL(dccp_reqsk_init); | ||
diff --git a/net/dccp/options.c b/net/dccp/options.c index fb0db1f7cd7b..f398b43bc055 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
@@ -22,23 +22,23 @@ | |||
22 | #include "dccp.h" | 22 | #include "dccp.h" |
23 | #include "feat.h" | 23 | #include "feat.h" |
24 | 24 | ||
25 | int dccp_feat_default_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW; | 25 | int sysctl_dccp_feat_sequence_window = DCCPF_INITIAL_SEQUENCE_WINDOW; |
26 | int dccp_feat_default_rx_ccid = DCCPF_INITIAL_CCID; | 26 | int sysctl_dccp_feat_rx_ccid = DCCPF_INITIAL_CCID; |
27 | int dccp_feat_default_tx_ccid = DCCPF_INITIAL_CCID; | 27 | int sysctl_dccp_feat_tx_ccid = DCCPF_INITIAL_CCID; |
28 | int dccp_feat_default_ack_ratio = DCCPF_INITIAL_ACK_RATIO; | 28 | int sysctl_dccp_feat_ack_ratio = DCCPF_INITIAL_ACK_RATIO; |
29 | int dccp_feat_default_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR; | 29 | int sysctl_dccp_feat_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR; |
30 | int dccp_feat_default_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT; | 30 | int sysctl_dccp_feat_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT; |
31 | 31 | ||
32 | EXPORT_SYMBOL_GPL(dccp_feat_default_sequence_window); | 32 | EXPORT_SYMBOL_GPL(sysctl_dccp_feat_sequence_window); |
33 | 33 | ||
34 | void dccp_minisock_init(struct dccp_minisock *dmsk) | 34 | void dccp_minisock_init(struct dccp_minisock *dmsk) |
35 | { | 35 | { |
36 | dmsk->dccpms_sequence_window = dccp_feat_default_sequence_window; | 36 | dmsk->dccpms_sequence_window = sysctl_dccp_feat_sequence_window; |
37 | dmsk->dccpms_rx_ccid = dccp_feat_default_rx_ccid; | 37 | dmsk->dccpms_rx_ccid = sysctl_dccp_feat_rx_ccid; |
38 | dmsk->dccpms_tx_ccid = dccp_feat_default_tx_ccid; | 38 | dmsk->dccpms_tx_ccid = sysctl_dccp_feat_tx_ccid; |
39 | dmsk->dccpms_ack_ratio = dccp_feat_default_ack_ratio; | 39 | dmsk->dccpms_ack_ratio = sysctl_dccp_feat_ack_ratio; |
40 | dmsk->dccpms_send_ack_vector = dccp_feat_default_send_ack_vector; | 40 | dmsk->dccpms_send_ack_vector = sysctl_dccp_feat_send_ack_vector; |
41 | dmsk->dccpms_send_ndp_count = dccp_feat_default_send_ndp_count; | 41 | dmsk->dccpms_send_ndp_count = sysctl_dccp_feat_send_ndp_count; |
42 | } | 42 | } |
43 | 43 | ||
44 | static u32 dccp_decode_value_var(const unsigned char *bf, const u8 len) | 44 | static u32 dccp_decode_value_var(const unsigned char *bf, const u8 len) |
@@ -60,12 +60,9 @@ static u32 dccp_decode_value_var(const unsigned char *bf, const u8 len) | |||
60 | int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | 60 | int dccp_parse_options(struct sock *sk, struct sk_buff *skb) |
61 | { | 61 | { |
62 | struct dccp_sock *dp = dccp_sk(sk); | 62 | struct dccp_sock *dp = dccp_sk(sk); |
63 | #ifdef CONFIG_IP_DCCP_DEBUG | ||
64 | const char *debug_prefix = dp->dccps_role == DCCP_ROLE_CLIENT ? | ||
65 | "CLIENT rx opt: " : "server rx opt: "; | ||
66 | #endif | ||
67 | const struct dccp_hdr *dh = dccp_hdr(skb); | 63 | const struct dccp_hdr *dh = dccp_hdr(skb); |
68 | const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type; | 64 | const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type; |
65 | u64 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; | ||
69 | unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb); | 66 | unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb); |
70 | unsigned char *opt_ptr = options; | 67 | unsigned char *opt_ptr = options; |
71 | const unsigned char *opt_end = (unsigned char *)dh + | 68 | const unsigned char *opt_end = (unsigned char *)dh + |
@@ -119,7 +116,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
119 | goto out_invalid_option; | 116 | goto out_invalid_option; |
120 | 117 | ||
121 | opt_recv->dccpor_ndp = dccp_decode_value_var(value, len); | 118 | opt_recv->dccpor_ndp = dccp_decode_value_var(value, len); |
122 | dccp_pr_debug("%sNDP count=%d\n", debug_prefix, | 119 | dccp_pr_debug("%s rx opt: NDP count=%d\n", dccp_role(sk), |
123 | opt_recv->dccpor_ndp); | 120 | opt_recv->dccpor_ndp); |
124 | break; | 121 | break; |
125 | case DCCPO_CHANGE_L: | 122 | case DCCPO_CHANGE_L: |
@@ -153,7 +150,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
153 | break; | 150 | break; |
154 | 151 | ||
155 | if (dccp_msk(sk)->dccpms_send_ack_vector && | 152 | if (dccp_msk(sk)->dccpms_send_ack_vector && |
156 | dccp_ackvec_parse(sk, skb, opt, value, len)) | 153 | dccp_ackvec_parse(sk, skb, &ackno, opt, value, len)) |
157 | goto out_invalid_option; | 154 | goto out_invalid_option; |
158 | break; | 155 | break; |
159 | case DCCPO_TIMESTAMP: | 156 | case DCCPO_TIMESTAMP: |
@@ -165,8 +162,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
165 | dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp; | 162 | dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp; |
166 | dccp_timestamp(sk, &dp->dccps_timestamp_time); | 163 | dccp_timestamp(sk, &dp->dccps_timestamp_time); |
167 | 164 | ||
168 | dccp_pr_debug("%sTIMESTAMP=%u, ackno=%llu\n", | 165 | dccp_pr_debug("%s rx opt: TIMESTAMP=%u, ackno=%llu\n", |
169 | debug_prefix, opt_recv->dccpor_timestamp, | 166 | dccp_role(sk), opt_recv->dccpor_timestamp, |
170 | (unsigned long long) | 167 | (unsigned long long) |
171 | DCCP_SKB_CB(skb)->dccpd_ack_seq); | 168 | DCCP_SKB_CB(skb)->dccpd_ack_seq); |
172 | break; | 169 | break; |
@@ -176,8 +173,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
176 | 173 | ||
177 | opt_recv->dccpor_timestamp_echo = ntohl(*(__be32 *)value); | 174 | opt_recv->dccpor_timestamp_echo = ntohl(*(__be32 *)value); |
178 | 175 | ||
179 | dccp_pr_debug("%sTIMESTAMP_ECHO=%u, len=%d, ackno=%llu, ", | 176 | dccp_pr_debug("%s rx opt: TIMESTAMP_ECHO=%u, len=%d, " |
180 | debug_prefix, | 177 | "ackno=%llu, ", dccp_role(sk), |
181 | opt_recv->dccpor_timestamp_echo, | 178 | opt_recv->dccpor_timestamp_echo, |
182 | len + 2, | 179 | len + 2, |
183 | (unsigned long long) | 180 | (unsigned long long) |
@@ -211,8 +208,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
211 | if (elapsed_time > opt_recv->dccpor_elapsed_time) | 208 | if (elapsed_time > opt_recv->dccpor_elapsed_time) |
212 | opt_recv->dccpor_elapsed_time = elapsed_time; | 209 | opt_recv->dccpor_elapsed_time = elapsed_time; |
213 | 210 | ||
214 | dccp_pr_debug("%sELAPSED_TIME=%d\n", debug_prefix, | 211 | dccp_pr_debug("%s rx opt: ELAPSED_TIME=%d\n", |
215 | elapsed_time); | 212 | dccp_role(sk), elapsed_time); |
216 | break; | 213 | break; |
217 | /* | 214 | /* |
218 | * From RFC 4340, sec. 10.3: | 215 | * From RFC 4340, sec. 10.3: |
@@ -242,9 +239,8 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
242 | } | 239 | } |
243 | break; | 240 | break; |
244 | default: | 241 | default: |
245 | pr_info("DCCP(%p): option %d(len=%d) not " | 242 | DCCP_CRIT("DCCP(%p): option %d(len=%d) not " |
246 | "implemented, ignoring\n", | 243 | "implemented, ignoring", sk, opt, len); |
247 | sk, opt, len); | ||
248 | break; | 244 | break; |
249 | } | 245 | } |
250 | 246 | ||
@@ -261,7 +257,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) | |||
261 | out_invalid_option: | 257 | out_invalid_option: |
262 | DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT); | 258 | DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT); |
263 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR; | 259 | DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR; |
264 | pr_info("DCCP(%p): invalid option %d, len=%d\n", sk, opt, len); | 260 | DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len); |
265 | return -1; | 261 | return -1; |
266 | } | 262 | } |
267 | 263 | ||
@@ -451,8 +447,7 @@ static int dccp_insert_feat_opt(struct sk_buff *skb, u8 type, u8 feat, | |||
451 | u8 *to; | 447 | u8 *to; |
452 | 448 | ||
453 | if (DCCP_SKB_CB(skb)->dccpd_opt_len + len + 3 > DCCP_MAX_OPT_LEN) { | 449 | if (DCCP_SKB_CB(skb)->dccpd_opt_len + len + 3 > DCCP_MAX_OPT_LEN) { |
454 | LIMIT_NETDEBUG(KERN_INFO "DCCP: packet too small" | 450 | DCCP_WARN("packet too small for feature %d option!\n", feat); |
455 | " to insert feature %d option!\n", feat); | ||
456 | return -1; | 451 | return -1; |
457 | } | 452 | } |
458 | 453 | ||
@@ -465,8 +460,10 @@ static int dccp_insert_feat_opt(struct sk_buff *skb, u8 type, u8 feat, | |||
465 | 460 | ||
466 | if (len) | 461 | if (len) |
467 | memcpy(to, val, len); | 462 | memcpy(to, val, len); |
468 | dccp_pr_debug("option %d feat %d len %d\n", type, feat, len); | ||
469 | 463 | ||
464 | dccp_pr_debug("%s(%s (%d), ...), length %d\n", | ||
465 | dccp_feat_typename(type), | ||
466 | dccp_feat_name(feat), feat, len); | ||
470 | return 0; | 467 | return 0; |
471 | } | 468 | } |
472 | 469 | ||
diff --git a/net/dccp/output.c b/net/dccp/output.c index 7102e3aed4ca..400c30b6fcae 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
@@ -88,16 +88,15 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
88 | return -EPROTO; | 88 | return -EPROTO; |
89 | } | 89 | } |
90 | 90 | ||
91 | skb->h.raw = skb_push(skb, dccp_header_size); | ||
92 | dh = dccp_hdr(skb); | ||
93 | 91 | ||
94 | /* Build DCCP header and checksum it. */ | 92 | /* Build DCCP header and checksum it. */ |
95 | memset(dh, 0, dccp_header_size); | 93 | dh = dccp_zeroed_hdr(skb, dccp_header_size); |
96 | dh->dccph_type = dcb->dccpd_type; | 94 | dh->dccph_type = dcb->dccpd_type; |
97 | dh->dccph_sport = inet->sport; | 95 | dh->dccph_sport = inet->sport; |
98 | dh->dccph_dport = inet->dport; | 96 | dh->dccph_dport = inet->dport; |
99 | dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; | 97 | dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; |
100 | dh->dccph_ccval = dcb->dccpd_ccval; | 98 | dh->dccph_ccval = dcb->dccpd_ccval; |
99 | dh->dccph_cscov = dp->dccps_pcslen; | ||
101 | /* XXX For now we're using only 48 bits sequence numbers */ | 100 | /* XXX For now we're using only 48 bits sequence numbers */ |
102 | dh->dccph_x = 1; | 101 | dh->dccph_x = 1; |
103 | 102 | ||
@@ -117,7 +116,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
117 | break; | 116 | break; |
118 | } | 117 | } |
119 | 118 | ||
120 | icsk->icsk_af_ops->send_check(sk, skb->len, skb); | 119 | icsk->icsk_af_ops->send_check(sk, 0, skb); |
121 | 120 | ||
122 | if (set_ack) | 121 | if (set_ack) |
123 | dccp_event_ack_sent(sk); | 122 | dccp_event_ack_sent(sk); |
@@ -125,17 +124,8 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | |||
125 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 124 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
126 | 125 | ||
127 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 126 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
128 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); | 127 | err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0); |
129 | if (err <= 0) | 128 | return net_xmit_eval(err); |
130 | return err; | ||
131 | |||
132 | /* NET_XMIT_CN is special. It does not guarantee, | ||
133 | * that this packet is lost. It tells that device | ||
134 | * is about to start to drop packets or already | ||
135 | * drops some packets of the same priority and | ||
136 | * invokes us to send less aggressively. | ||
137 | */ | ||
138 | return err == NET_XMIT_CN ? 0 : err; | ||
139 | } | 129 | } |
140 | return -ENOBUFS; | 130 | return -ENOBUFS; |
141 | } | 131 | } |
@@ -205,8 +195,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, | |||
205 | if (signal_pending(current)) | 195 | if (signal_pending(current)) |
206 | goto do_interrupted; | 196 | goto do_interrupted; |
207 | 197 | ||
208 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, | 198 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); |
209 | skb->len); | ||
210 | if (rc <= 0) | 199 | if (rc <= 0) |
211 | break; | 200 | break; |
212 | delay = msecs_to_jiffies(rc); | 201 | delay = msecs_to_jiffies(rc); |
@@ -251,25 +240,23 @@ void dccp_write_xmit(struct sock *sk, int block) | |||
251 | { | 240 | { |
252 | struct dccp_sock *dp = dccp_sk(sk); | 241 | struct dccp_sock *dp = dccp_sk(sk); |
253 | struct sk_buff *skb; | 242 | struct sk_buff *skb; |
254 | long timeo = 30000; /* If a packet is taking longer than 2 secs | 243 | long timeo = DCCP_XMIT_TIMEO; /* If a packet is taking longer than |
255 | we have other issues */ | 244 | this we have other issues */ |
256 | 245 | ||
257 | while ((skb = skb_peek(&sk->sk_write_queue))) { | 246 | while ((skb = skb_peek(&sk->sk_write_queue))) { |
258 | int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, | 247 | int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); |
259 | skb->len); | ||
260 | 248 | ||
261 | if (err > 0) { | 249 | if (err > 0) { |
262 | if (!block) { | 250 | if (!block) { |
263 | sk_reset_timer(sk, &dp->dccps_xmit_timer, | 251 | sk_reset_timer(sk, &dp->dccps_xmit_timer, |
264 | msecs_to_jiffies(err)+jiffies); | 252 | msecs_to_jiffies(err)+jiffies); |
265 | break; | 253 | break; |
266 | } else | 254 | } else { |
267 | err = dccp_wait_for_ccid(sk, skb, &timeo); | 255 | err = dccp_wait_for_ccid(sk, skb, &timeo); |
268 | if (err) { | 256 | timeo = DCCP_XMIT_TIMEO; |
269 | printk(KERN_CRIT "%s:err at dccp_wait_for_ccid" | ||
270 | " %d\n", __FUNCTION__, err); | ||
271 | dump_stack(); | ||
272 | } | 257 | } |
258 | if (err) | ||
259 | DCCP_BUG("err=%d after dccp_wait_for_ccid", err); | ||
273 | } | 260 | } |
274 | 261 | ||
275 | skb_dequeue(&sk->sk_write_queue); | 262 | skb_dequeue(&sk->sk_write_queue); |
@@ -291,12 +278,9 @@ void dccp_write_xmit(struct sock *sk, int block) | |||
291 | 278 | ||
292 | err = dccp_transmit_skb(sk, skb); | 279 | err = dccp_transmit_skb(sk, skb); |
293 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); | 280 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); |
294 | if (err) { | 281 | if (err) |
295 | printk(KERN_CRIT "%s:err from " | 282 | DCCP_BUG("err=%d after ccid_hc_tx_packet_sent", |
296 | "ccid_hc_tx_packet_sent %d\n", | 283 | err); |
297 | __FUNCTION__, err); | ||
298 | dump_stack(); | ||
299 | } | ||
300 | } else | 284 | } else |
301 | kfree(skb); | 285 | kfree(skb); |
302 | } | 286 | } |
@@ -329,9 +313,10 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | |||
329 | skb_reserve(skb, sk->sk_prot->max_header); | 313 | skb_reserve(skb, sk->sk_prot->max_header); |
330 | 314 | ||
331 | skb->dst = dst_clone(dst); | 315 | skb->dst = dst_clone(dst); |
332 | skb->csum = 0; | ||
333 | 316 | ||
334 | dreq = dccp_rsk(req); | 317 | dreq = dccp_rsk(req); |
318 | if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ | ||
319 | dccp_inc_seqno(&dreq->dreq_iss); | ||
335 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; | 320 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; |
336 | DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; | 321 | DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; |
337 | 322 | ||
@@ -340,10 +325,8 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | |||
340 | return NULL; | 325 | return NULL; |
341 | } | 326 | } |
342 | 327 | ||
343 | skb->h.raw = skb_push(skb, dccp_header_size); | 328 | /* Build and checksum header */ |
344 | 329 | dh = dccp_zeroed_hdr(skb, dccp_header_size); | |
345 | dh = dccp_hdr(skb); | ||
346 | memset(dh, 0, dccp_header_size); | ||
347 | 330 | ||
348 | dh->dccph_sport = inet_sk(sk)->sport; | 331 | dh->dccph_sport = inet_sk(sk)->sport; |
349 | dh->dccph_dport = inet_rsk(req)->rmt_port; | 332 | dh->dccph_dport = inet_rsk(req)->rmt_port; |
@@ -355,6 +338,10 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | |||
355 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); | 338 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); |
356 | dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; | 339 | dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; |
357 | 340 | ||
341 | dccp_csum_outgoing(skb); | ||
342 | |||
343 | /* We use `acked' to remember that a Response was already sent. */ | ||
344 | inet_rsk(req)->acked = 1; | ||
358 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 345 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
359 | return skb; | 346 | return skb; |
360 | } | 347 | } |
@@ -379,7 +366,6 @@ static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, | |||
379 | skb_reserve(skb, sk->sk_prot->max_header); | 366 | skb_reserve(skb, sk->sk_prot->max_header); |
380 | 367 | ||
381 | skb->dst = dst_clone(dst); | 368 | skb->dst = dst_clone(dst); |
382 | skb->csum = 0; | ||
383 | 369 | ||
384 | dccp_inc_seqno(&dp->dccps_gss); | 370 | dccp_inc_seqno(&dp->dccps_gss); |
385 | 371 | ||
@@ -392,10 +378,7 @@ static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, | |||
392 | return NULL; | 378 | return NULL; |
393 | } | 379 | } |
394 | 380 | ||
395 | skb->h.raw = skb_push(skb, dccp_header_size); | 381 | dh = dccp_zeroed_hdr(skb, dccp_header_size); |
396 | |||
397 | dh = dccp_hdr(skb); | ||
398 | memset(dh, 0, dccp_header_size); | ||
399 | 382 | ||
400 | dh->dccph_sport = inet_sk(sk)->sport; | 383 | dh->dccph_sport = inet_sk(sk)->sport; |
401 | dh->dccph_dport = inet_sk(sk)->dport; | 384 | dh->dccph_dport = inet_sk(sk)->dport; |
@@ -407,7 +390,7 @@ static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, | |||
407 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); | 390 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); |
408 | 391 | ||
409 | dccp_hdr_reset(skb)->dccph_reset_code = code; | 392 | dccp_hdr_reset(skb)->dccph_reset_code = code; |
410 | inet_csk(sk)->icsk_af_ops->send_check(sk, skb->len, skb); | 393 | inet_csk(sk)->icsk_af_ops->send_check(sk, 0, skb); |
411 | 394 | ||
412 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | 395 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
413 | return skb; | 396 | return skb; |
@@ -426,9 +409,8 @@ int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) | |||
426 | code); | 409 | code); |
427 | if (skb != NULL) { | 410 | if (skb != NULL) { |
428 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 411 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
429 | err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, 0); | 412 | err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0); |
430 | if (err == NET_XMIT_CN) | 413 | return net_xmit_eval(err); |
431 | err = 0; | ||
432 | } | 414 | } |
433 | } | 415 | } |
434 | 416 | ||
@@ -449,7 +431,6 @@ static inline void dccp_connect_init(struct sock *sk) | |||
449 | 431 | ||
450 | dccp_sync_mss(sk, dst_mtu(dst)); | 432 | dccp_sync_mss(sk, dst_mtu(dst)); |
451 | 433 | ||
452 | dccp_update_gss(sk, dp->dccps_iss); | ||
453 | /* | 434 | /* |
454 | * SWL and AWL are initially adjusted so that they are not less than | 435 | * SWL and AWL are initially adjusted so that they are not less than |
455 | * the initial Sequence Numbers received and sent, respectively: | 436 | * the initial Sequence Numbers received and sent, respectively: |
@@ -458,8 +439,13 @@ static inline void dccp_connect_init(struct sock *sk) | |||
458 | * These adjustments MUST be applied only at the beginning of the | 439 | * These adjustments MUST be applied only at the beginning of the |
459 | * connection. | 440 | * connection. |
460 | */ | 441 | */ |
442 | dccp_update_gss(sk, dp->dccps_iss); | ||
461 | dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); | 443 | dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); |
462 | 444 | ||
445 | /* S.GAR - greatest valid acknowledgement number received on a non-Sync; | ||
446 | * initialized to S.ISS (sec. 8.5) */ | ||
447 | dp->dccps_gar = dp->dccps_iss; | ||
448 | |||
463 | icsk->icsk_retransmits = 0; | 449 | icsk->icsk_retransmits = 0; |
464 | init_timer(&dp->dccps_xmit_timer); | 450 | init_timer(&dp->dccps_xmit_timer); |
465 | dp->dccps_xmit_timer.data = (unsigned long)sk; | 451 | dp->dccps_xmit_timer.data = (unsigned long)sk; |
@@ -481,7 +467,6 @@ int dccp_connect(struct sock *sk) | |||
481 | skb_reserve(skb, sk->sk_prot->max_header); | 467 | skb_reserve(skb, sk->sk_prot->max_header); |
482 | 468 | ||
483 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; | 469 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; |
484 | skb->csum = 0; | ||
485 | 470 | ||
486 | dccp_skb_entail(sk, skb); | 471 | dccp_skb_entail(sk, skb); |
487 | dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL)); | 472 | dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL)); |
@@ -513,7 +498,6 @@ void dccp_send_ack(struct sock *sk) | |||
513 | 498 | ||
514 | /* Reserve space for headers */ | 499 | /* Reserve space for headers */ |
515 | skb_reserve(skb, sk->sk_prot->max_header); | 500 | skb_reserve(skb, sk->sk_prot->max_header); |
516 | skb->csum = 0; | ||
517 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; | 501 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; |
518 | dccp_transmit_skb(sk, skb); | 502 | dccp_transmit_skb(sk, skb); |
519 | } | 503 | } |
@@ -567,7 +551,6 @@ void dccp_send_sync(struct sock *sk, const u64 seq, | |||
567 | 551 | ||
568 | /* Reserve space for headers and prepare control bits. */ | 552 | /* Reserve space for headers and prepare control bits. */ |
569 | skb_reserve(skb, sk->sk_prot->max_header); | 553 | skb_reserve(skb, sk->sk_prot->max_header); |
570 | skb->csum = 0; | ||
571 | DCCP_SKB_CB(skb)->dccpd_type = pkt_type; | 554 | DCCP_SKB_CB(skb)->dccpd_type = pkt_type; |
572 | DCCP_SKB_CB(skb)->dccpd_seq = seq; | 555 | DCCP_SKB_CB(skb)->dccpd_seq = seq; |
573 | 556 | ||
@@ -593,7 +576,6 @@ void dccp_send_close(struct sock *sk, const int active) | |||
593 | 576 | ||
594 | /* Reserve space for headers and prepare control bits. */ | 577 | /* Reserve space for headers and prepare control bits. */ |
595 | skb_reserve(skb, sk->sk_prot->max_header); | 578 | skb_reserve(skb, sk->sk_prot->max_header); |
596 | skb->csum = 0; | ||
597 | DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? | 579 | DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? |
598 | DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; | 580 | DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; |
599 | 581 | ||
diff --git a/net/dccp/probe.c b/net/dccp/probe.c index 146496fce2e2..f81e37de35d5 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c | |||
@@ -106,8 +106,10 @@ static int jdccp_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
106 | } | 106 | } |
107 | 107 | ||
108 | static struct jprobe dccp_send_probe = { | 108 | static struct jprobe dccp_send_probe = { |
109 | .kp = { .addr = (kprobe_opcode_t *)&dccp_sendmsg, }, | 109 | .kp = { |
110 | .entry = (kprobe_opcode_t *)&jdccp_sendmsg, | 110 | .symbol_name = "dccp_sendmsg", |
111 | }, | ||
112 | .entry = JPROBE_ENTRY(jdccp_sendmsg), | ||
111 | }; | 113 | }; |
112 | 114 | ||
113 | static int dccpprobe_open(struct inode *inode, struct file *file) | 115 | static int dccpprobe_open(struct inode *inode, struct file *file) |
@@ -160,6 +162,8 @@ static __init int dccpprobe_init(void) | |||
160 | init_waitqueue_head(&dccpw.wait); | 162 | init_waitqueue_head(&dccpw.wait); |
161 | spin_lock_init(&dccpw.lock); | 163 | spin_lock_init(&dccpw.lock); |
162 | dccpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &dccpw.lock); | 164 | dccpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &dccpw.lock); |
165 | if (IS_ERR(dccpw.fifo)) | ||
166 | return PTR_ERR(dccpw.fifo); | ||
163 | 167 | ||
164 | if (!proc_net_fops_create(procname, S_IRUSR, &dccpprobe_fops)) | 168 | if (!proc_net_fops_create(procname, S_IRUSR, &dccpprobe_fops)) |
165 | goto err0; | 169 | goto err0; |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 72cbdcfc2c65..5ec47d9ee447 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -52,6 +52,9 @@ struct inet_hashinfo __cacheline_aligned dccp_hashinfo = { | |||
52 | 52 | ||
53 | EXPORT_SYMBOL_GPL(dccp_hashinfo); | 53 | EXPORT_SYMBOL_GPL(dccp_hashinfo); |
54 | 54 | ||
55 | /* the maximum queue length for tx in packets. 0 is no limit */ | ||
56 | int sysctl_dccp_tx_qlen __read_mostly = 5; | ||
57 | |||
55 | void dccp_set_state(struct sock *sk, const int state) | 58 | void dccp_set_state(struct sock *sk, const int state) |
56 | { | 59 | { |
57 | const int oldstate = sk->sk_state; | 60 | const int oldstate = sk->sk_state; |
@@ -212,6 +215,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) | |||
212 | 215 | ||
213 | dccp_init_xmit_timers(sk); | 216 | dccp_init_xmit_timers(sk); |
214 | icsk->icsk_rto = DCCP_TIMEOUT_INIT; | 217 | icsk->icsk_rto = DCCP_TIMEOUT_INIT; |
218 | icsk->icsk_syn_retries = sysctl_dccp_request_retries; | ||
215 | sk->sk_state = DCCP_CLOSED; | 219 | sk->sk_state = DCCP_CLOSED; |
216 | sk->sk_write_space = dccp_write_space; | 220 | sk->sk_write_space = dccp_write_space; |
217 | icsk->icsk_sync_mss = dccp_sync_mss; | 221 | icsk->icsk_sync_mss = dccp_sync_mss; |
@@ -262,12 +266,12 @@ int dccp_destroy_sock(struct sock *sk) | |||
262 | 266 | ||
263 | EXPORT_SYMBOL_GPL(dccp_destroy_sock); | 267 | EXPORT_SYMBOL_GPL(dccp_destroy_sock); |
264 | 268 | ||
265 | static inline int dccp_listen_start(struct sock *sk) | 269 | static inline int dccp_listen_start(struct sock *sk, int backlog) |
266 | { | 270 | { |
267 | struct dccp_sock *dp = dccp_sk(sk); | 271 | struct dccp_sock *dp = dccp_sk(sk); |
268 | 272 | ||
269 | dp->dccps_role = DCCP_ROLE_LISTEN; | 273 | dp->dccps_role = DCCP_ROLE_LISTEN; |
270 | return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE); | 274 | return inet_csk_listen_start(sk, backlog); |
271 | } | 275 | } |
272 | 276 | ||
273 | int dccp_disconnect(struct sock *sk, int flags) | 277 | int dccp_disconnect(struct sock *sk, int flags) |
@@ -451,9 +455,8 @@ out_free_val: | |||
451 | static int do_dccp_setsockopt(struct sock *sk, int level, int optname, | 455 | static int do_dccp_setsockopt(struct sock *sk, int level, int optname, |
452 | char __user *optval, int optlen) | 456 | char __user *optval, int optlen) |
453 | { | 457 | { |
454 | struct dccp_sock *dp; | 458 | struct dccp_sock *dp = dccp_sk(sk); |
455 | int err; | 459 | int val, err = 0; |
456 | int val; | ||
457 | 460 | ||
458 | if (optlen < sizeof(int)) | 461 | if (optlen < sizeof(int)) |
459 | return -EINVAL; | 462 | return -EINVAL; |
@@ -465,14 +468,11 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, | |||
465 | return dccp_setsockopt_service(sk, val, optval, optlen); | 468 | return dccp_setsockopt_service(sk, val, optval, optlen); |
466 | 469 | ||
467 | lock_sock(sk); | 470 | lock_sock(sk); |
468 | dp = dccp_sk(sk); | ||
469 | err = 0; | ||
470 | |||
471 | switch (optname) { | 471 | switch (optname) { |
472 | case DCCP_SOCKOPT_PACKET_SIZE: | 472 | case DCCP_SOCKOPT_PACKET_SIZE: |
473 | dp->dccps_packet_size = val; | 473 | DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n"); |
474 | err = 0; | ||
474 | break; | 475 | break; |
475 | |||
476 | case DCCP_SOCKOPT_CHANGE_L: | 476 | case DCCP_SOCKOPT_CHANGE_L: |
477 | if (optlen != sizeof(struct dccp_so_feat)) | 477 | if (optlen != sizeof(struct dccp_so_feat)) |
478 | err = -EINVAL; | 478 | err = -EINVAL; |
@@ -481,7 +481,6 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, | |||
481 | (struct dccp_so_feat __user *) | 481 | (struct dccp_so_feat __user *) |
482 | optval); | 482 | optval); |
483 | break; | 483 | break; |
484 | |||
485 | case DCCP_SOCKOPT_CHANGE_R: | 484 | case DCCP_SOCKOPT_CHANGE_R: |
486 | if (optlen != sizeof(struct dccp_so_feat)) | 485 | if (optlen != sizeof(struct dccp_so_feat)) |
487 | err = -EINVAL; | 486 | err = -EINVAL; |
@@ -490,12 +489,26 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, | |||
490 | (struct dccp_so_feat __user *) | 489 | (struct dccp_so_feat __user *) |
491 | optval); | 490 | optval); |
492 | break; | 491 | break; |
493 | 492 | case DCCP_SOCKOPT_SEND_CSCOV: /* sender side, RFC 4340, sec. 9.2 */ | |
493 | if (val < 0 || val > 15) | ||
494 | err = -EINVAL; | ||
495 | else | ||
496 | dp->dccps_pcslen = val; | ||
497 | break; | ||
498 | case DCCP_SOCKOPT_RECV_CSCOV: /* receiver side, RFC 4340 sec. 9.2.1 */ | ||
499 | if (val < 0 || val > 15) | ||
500 | err = -EINVAL; | ||
501 | else { | ||
502 | dp->dccps_pcrlen = val; | ||
503 | /* FIXME: add feature negotiation, | ||
504 | * ChangeL(MinimumChecksumCoverage, val) */ | ||
505 | } | ||
506 | break; | ||
494 | default: | 507 | default: |
495 | err = -ENOPROTOOPT; | 508 | err = -ENOPROTOOPT; |
496 | break; | 509 | break; |
497 | } | 510 | } |
498 | 511 | ||
499 | release_sock(sk); | 512 | release_sock(sk); |
500 | return err; | 513 | return err; |
501 | } | 514 | } |
@@ -569,12 +582,17 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname, | |||
569 | 582 | ||
570 | switch (optname) { | 583 | switch (optname) { |
571 | case DCCP_SOCKOPT_PACKET_SIZE: | 584 | case DCCP_SOCKOPT_PACKET_SIZE: |
572 | val = dp->dccps_packet_size; | 585 | DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n"); |
573 | len = sizeof(dp->dccps_packet_size); | 586 | return 0; |
574 | break; | ||
575 | case DCCP_SOCKOPT_SERVICE: | 587 | case DCCP_SOCKOPT_SERVICE: |
576 | return dccp_getsockopt_service(sk, len, | 588 | return dccp_getsockopt_service(sk, len, |
577 | (__be32 __user *)optval, optlen); | 589 | (__be32 __user *)optval, optlen); |
590 | case DCCP_SOCKOPT_SEND_CSCOV: | ||
591 | val = dp->dccps_pcslen; | ||
592 | break; | ||
593 | case DCCP_SOCKOPT_RECV_CSCOV: | ||
594 | val = dp->dccps_pcrlen; | ||
595 | break; | ||
578 | case 128 ... 191: | 596 | case 128 ... 191: |
579 | return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, | 597 | return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, |
580 | len, (u32 __user *)optval, optlen); | 598 | len, (u32 __user *)optval, optlen); |
@@ -630,6 +648,13 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
630 | return -EMSGSIZE; | 648 | return -EMSGSIZE; |
631 | 649 | ||
632 | lock_sock(sk); | 650 | lock_sock(sk); |
651 | |||
652 | if (sysctl_dccp_tx_qlen && | ||
653 | (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) { | ||
654 | rc = -EAGAIN; | ||
655 | goto out_release; | ||
656 | } | ||
657 | |||
633 | timeo = sock_sndtimeo(sk, noblock); | 658 | timeo = sock_sndtimeo(sk, noblock); |
634 | 659 | ||
635 | /* | 660 | /* |
@@ -788,7 +813,7 @@ int inet_dccp_listen(struct socket *sock, int backlog) | |||
788 | * FIXME: here it probably should be sk->sk_prot->listen_start | 813 | * FIXME: here it probably should be sk->sk_prot->listen_start |
789 | * see tcp_listen_start | 814 | * see tcp_listen_start |
790 | */ | 815 | */ |
791 | err = dccp_listen_start(sk); | 816 | err = dccp_listen_start(sk, backlog); |
792 | if (err) | 817 | if (err) |
793 | goto out; | 818 | goto out; |
794 | } | 819 | } |
@@ -1008,8 +1033,7 @@ static int __init dccp_init(void) | |||
1008 | } while (!dccp_hashinfo.ehash && --ehash_order > 0); | 1033 | } while (!dccp_hashinfo.ehash && --ehash_order > 0); |
1009 | 1034 | ||
1010 | if (!dccp_hashinfo.ehash) { | 1035 | if (!dccp_hashinfo.ehash) { |
1011 | printk(KERN_CRIT "Failed to allocate DCCP " | 1036 | DCCP_CRIT("Failed to allocate DCCP established hash table"); |
1012 | "established hash table\n"); | ||
1013 | goto out_free_bind_bucket_cachep; | 1037 | goto out_free_bind_bucket_cachep; |
1014 | } | 1038 | } |
1015 | 1039 | ||
@@ -1031,7 +1055,7 @@ static int __init dccp_init(void) | |||
1031 | } while (!dccp_hashinfo.bhash && --bhash_order >= 0); | 1055 | } while (!dccp_hashinfo.bhash && --bhash_order >= 0); |
1032 | 1056 | ||
1033 | if (!dccp_hashinfo.bhash) { | 1057 | if (!dccp_hashinfo.bhash) { |
1034 | printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n"); | 1058 | DCCP_CRIT("Failed to allocate DCCP bind hash table"); |
1035 | goto out_free_dccp_ehash; | 1059 | goto out_free_dccp_ehash; |
1036 | } | 1060 | } |
1037 | 1061 | ||
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c index 38bc157876f3..fdcfca3e9208 100644 --- a/net/dccp/sysctl.c +++ b/net/dccp/sysctl.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/sysctl.h> | 13 | #include <linux/sysctl.h> |
14 | #include "dccp.h" | ||
14 | #include "feat.h" | 15 | #include "feat.h" |
15 | 16 | ||
16 | #ifndef CONFIG_SYSCTL | 17 | #ifndef CONFIG_SYSCTL |
@@ -19,53 +20,76 @@ | |||
19 | 20 | ||
20 | static struct ctl_table dccp_default_table[] = { | 21 | static struct ctl_table dccp_default_table[] = { |
21 | { | 22 | { |
22 | .ctl_name = NET_DCCP_DEFAULT_SEQ_WINDOW, | ||
23 | .procname = "seq_window", | 23 | .procname = "seq_window", |
24 | .data = &dccp_feat_default_sequence_window, | 24 | .data = &sysctl_dccp_feat_sequence_window, |
25 | .maxlen = sizeof(dccp_feat_default_sequence_window), | 25 | .maxlen = sizeof(sysctl_dccp_feat_sequence_window), |
26 | .mode = 0644, | 26 | .mode = 0644, |
27 | .proc_handler = proc_dointvec, | 27 | .proc_handler = proc_dointvec, |
28 | }, | 28 | }, |
29 | { | 29 | { |
30 | .ctl_name = NET_DCCP_DEFAULT_RX_CCID, | ||
31 | .procname = "rx_ccid", | 30 | .procname = "rx_ccid", |
32 | .data = &dccp_feat_default_rx_ccid, | 31 | .data = &sysctl_dccp_feat_rx_ccid, |
33 | .maxlen = sizeof(dccp_feat_default_rx_ccid), | 32 | .maxlen = sizeof(sysctl_dccp_feat_rx_ccid), |
34 | .mode = 0644, | 33 | .mode = 0644, |
35 | .proc_handler = proc_dointvec, | 34 | .proc_handler = proc_dointvec, |
36 | }, | 35 | }, |
37 | { | 36 | { |
38 | .ctl_name = NET_DCCP_DEFAULT_TX_CCID, | ||
39 | .procname = "tx_ccid", | 37 | .procname = "tx_ccid", |
40 | .data = &dccp_feat_default_tx_ccid, | 38 | .data = &sysctl_dccp_feat_tx_ccid, |
41 | .maxlen = sizeof(dccp_feat_default_tx_ccid), | 39 | .maxlen = sizeof(sysctl_dccp_feat_tx_ccid), |
42 | .mode = 0644, | 40 | .mode = 0644, |
43 | .proc_handler = proc_dointvec, | 41 | .proc_handler = proc_dointvec, |
44 | }, | 42 | }, |
45 | { | 43 | { |
46 | .ctl_name = NET_DCCP_DEFAULT_ACK_RATIO, | ||
47 | .procname = "ack_ratio", | 44 | .procname = "ack_ratio", |
48 | .data = &dccp_feat_default_ack_ratio, | 45 | .data = &sysctl_dccp_feat_ack_ratio, |
49 | .maxlen = sizeof(dccp_feat_default_ack_ratio), | 46 | .maxlen = sizeof(sysctl_dccp_feat_ack_ratio), |
50 | .mode = 0644, | 47 | .mode = 0644, |
51 | .proc_handler = proc_dointvec, | 48 | .proc_handler = proc_dointvec, |
52 | }, | 49 | }, |
53 | { | 50 | { |
54 | .ctl_name = NET_DCCP_DEFAULT_SEND_ACKVEC, | ||
55 | .procname = "send_ackvec", | 51 | .procname = "send_ackvec", |
56 | .data = &dccp_feat_default_send_ack_vector, | 52 | .data = &sysctl_dccp_feat_send_ack_vector, |
57 | .maxlen = sizeof(dccp_feat_default_send_ack_vector), | 53 | .maxlen = sizeof(sysctl_dccp_feat_send_ack_vector), |
58 | .mode = 0644, | 54 | .mode = 0644, |
59 | .proc_handler = proc_dointvec, | 55 | .proc_handler = proc_dointvec, |
60 | }, | 56 | }, |
61 | { | 57 | { |
62 | .ctl_name = NET_DCCP_DEFAULT_SEND_NDP, | ||
63 | .procname = "send_ndp", | 58 | .procname = "send_ndp", |
64 | .data = &dccp_feat_default_send_ndp_count, | 59 | .data = &sysctl_dccp_feat_send_ndp_count, |
65 | .maxlen = sizeof(dccp_feat_default_send_ndp_count), | 60 | .maxlen = sizeof(sysctl_dccp_feat_send_ndp_count), |
66 | .mode = 0644, | 61 | .mode = 0644, |
67 | .proc_handler = proc_dointvec, | 62 | .proc_handler = proc_dointvec, |
68 | }, | 63 | }, |
64 | { | ||
65 | .procname = "request_retries", | ||
66 | .data = &sysctl_dccp_request_retries, | ||
67 | .maxlen = sizeof(sysctl_dccp_request_retries), | ||
68 | .mode = 0644, | ||
69 | .proc_handler = proc_dointvec, | ||
70 | }, | ||
71 | { | ||
72 | .procname = "retries1", | ||
73 | .data = &sysctl_dccp_retries1, | ||
74 | .maxlen = sizeof(sysctl_dccp_retries1), | ||
75 | .mode = 0644, | ||
76 | .proc_handler = proc_dointvec, | ||
77 | }, | ||
78 | { | ||
79 | .procname = "retries2", | ||
80 | .data = &sysctl_dccp_retries2, | ||
81 | .maxlen = sizeof(sysctl_dccp_retries2), | ||
82 | .mode = 0644, | ||
83 | .proc_handler = proc_dointvec, | ||
84 | }, | ||
85 | { | ||
86 | .procname = "tx_qlen", | ||
87 | .data = &sysctl_dccp_tx_qlen, | ||
88 | .maxlen = sizeof(sysctl_dccp_tx_qlen), | ||
89 | .mode = 0644, | ||
90 | .proc_handler = proc_dointvec, | ||
91 | }, | ||
92 | |||
69 | { .ctl_name = 0, } | 93 | { .ctl_name = 0, } |
70 | }; | 94 | }; |
71 | 95 | ||
diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 8447742f5615..e8f519e7f481 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c | |||
@@ -15,15 +15,10 @@ | |||
15 | 15 | ||
16 | #include "dccp.h" | 16 | #include "dccp.h" |
17 | 17 | ||
18 | static void dccp_write_timer(unsigned long data); | 18 | /* sysctl variables governing numbers of retransmission attempts */ |
19 | static void dccp_keepalive_timer(unsigned long data); | 19 | int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES; |
20 | static void dccp_delack_timer(unsigned long data); | 20 | int sysctl_dccp_retries1 __read_mostly = TCP_RETR1; |
21 | 21 | int sysctl_dccp_retries2 __read_mostly = TCP_RETR2; | |
22 | void dccp_init_xmit_timers(struct sock *sk) | ||
23 | { | ||
24 | inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, | ||
25 | &dccp_keepalive_timer); | ||
26 | } | ||
27 | 22 | ||
28 | static void dccp_write_err(struct sock *sk) | 23 | static void dccp_write_err(struct sock *sk) |
29 | { | 24 | { |
@@ -44,11 +39,10 @@ static int dccp_write_timeout(struct sock *sk) | |||
44 | if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { | 39 | if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { |
45 | if (icsk->icsk_retransmits != 0) | 40 | if (icsk->icsk_retransmits != 0) |
46 | dst_negative_advice(&sk->sk_dst_cache); | 41 | dst_negative_advice(&sk->sk_dst_cache); |
47 | retry_until = icsk->icsk_syn_retries ? : | 42 | retry_until = icsk->icsk_syn_retries ? |
48 | /* FIXME! */ 3 /* FIXME! sysctl_tcp_syn_retries */; | 43 | : sysctl_dccp_request_retries; |
49 | } else { | 44 | } else { |
50 | if (icsk->icsk_retransmits >= | 45 | if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { |
51 | /* FIXME! sysctl_tcp_retries1 */ 5 /* FIXME! */) { | ||
52 | /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu | 46 | /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu |
53 | black hole detection. :-( | 47 | black hole detection. :-( |
54 | 48 | ||
@@ -72,7 +66,7 @@ static int dccp_write_timeout(struct sock *sk) | |||
72 | dst_negative_advice(&sk->sk_dst_cache); | 66 | dst_negative_advice(&sk->sk_dst_cache); |
73 | } | 67 | } |
74 | 68 | ||
75 | retry_until = /* FIXME! */ 15 /* FIXME! sysctl_tcp_retries2 */; | 69 | retry_until = sysctl_dccp_retries2; |
76 | /* | 70 | /* |
77 | * FIXME: see tcp_write_timout and tcp_out_of_resources | 71 | * FIXME: see tcp_write_timout and tcp_out_of_resources |
78 | */ | 72 | */ |
@@ -86,53 +80,6 @@ static int dccp_write_timeout(struct sock *sk) | |||
86 | return 0; | 80 | return 0; |
87 | } | 81 | } |
88 | 82 | ||
89 | /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */ | ||
90 | static void dccp_delack_timer(unsigned long data) | ||
91 | { | ||
92 | struct sock *sk = (struct sock *)data; | ||
93 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
94 | |||
95 | bh_lock_sock(sk); | ||
96 | if (sock_owned_by_user(sk)) { | ||
97 | /* Try again later. */ | ||
98 | icsk->icsk_ack.blocked = 1; | ||
99 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); | ||
100 | sk_reset_timer(sk, &icsk->icsk_delack_timer, | ||
101 | jiffies + TCP_DELACK_MIN); | ||
102 | goto out; | ||
103 | } | ||
104 | |||
105 | if (sk->sk_state == DCCP_CLOSED || | ||
106 | !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) | ||
107 | goto out; | ||
108 | if (time_after(icsk->icsk_ack.timeout, jiffies)) { | ||
109 | sk_reset_timer(sk, &icsk->icsk_delack_timer, | ||
110 | icsk->icsk_ack.timeout); | ||
111 | goto out; | ||
112 | } | ||
113 | |||
114 | icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; | ||
115 | |||
116 | if (inet_csk_ack_scheduled(sk)) { | ||
117 | if (!icsk->icsk_ack.pingpong) { | ||
118 | /* Delayed ACK missed: inflate ATO. */ | ||
119 | icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, | ||
120 | icsk->icsk_rto); | ||
121 | } else { | ||
122 | /* Delayed ACK missed: leave pingpong mode and | ||
123 | * deflate ATO. | ||
124 | */ | ||
125 | icsk->icsk_ack.pingpong = 0; | ||
126 | icsk->icsk_ack.ato = TCP_ATO_MIN; | ||
127 | } | ||
128 | dccp_send_ack(sk); | ||
129 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); | ||
130 | } | ||
131 | out: | ||
132 | bh_unlock_sock(sk); | ||
133 | sock_put(sk); | ||
134 | } | ||
135 | |||
136 | /* | 83 | /* |
137 | * The DCCP retransmit timer. | 84 | * The DCCP retransmit timer. |
138 | */ | 85 | */ |
@@ -142,7 +89,7 @@ static void dccp_retransmit_timer(struct sock *sk) | |||
142 | 89 | ||
143 | /* retransmit timer is used for feature negotiation throughout | 90 | /* retransmit timer is used for feature negotiation throughout |
144 | * connection. In this case, no packet is re-transmitted, but rather an | 91 | * connection. In this case, no packet is re-transmitted, but rather an |
145 | * ack is generated and pending changes are splaced into its options. | 92 | * ack is generated and pending changes are placed into its options. |
146 | */ | 93 | */ |
147 | if (sk->sk_send_head == NULL) { | 94 | if (sk->sk_send_head == NULL) { |
148 | dccp_pr_debug("feat negotiation retransmit timeout %p\n", sk); | 95 | dccp_pr_debug("feat negotiation retransmit timeout %p\n", sk); |
@@ -154,9 +101,11 @@ static void dccp_retransmit_timer(struct sock *sk) | |||
154 | /* | 101 | /* |
155 | * sk->sk_send_head has to have one skb with | 102 | * sk->sk_send_head has to have one skb with |
156 | * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP | 103 | * DCCP_SKB_CB(skb)->dccpd_type set to one of the retransmittable DCCP |
157 | * packet types (REQUEST, RESPONSE, the ACK in the 3way handshake | 104 | * packet types. The only packets eligible for retransmission are: |
158 | * (PARTOPEN timer), etc). | 105 | * -- Requests in client-REQUEST state (sec. 8.1.1) |
159 | */ | 106 | * -- Acks in client-PARTOPEN state (sec. 8.1.5) |
107 | * -- CloseReq in server-CLOSEREQ state (sec. 8.3) | ||
108 | * -- Close in node-CLOSING state (sec. 8.3) */ | ||
160 | BUG_TRAP(sk->sk_send_head != NULL); | 109 | BUG_TRAP(sk->sk_send_head != NULL); |
161 | 110 | ||
162 | /* | 111 | /* |
@@ -194,7 +143,7 @@ backoff: | |||
194 | icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX); | 143 | icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX); |
195 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, | 144 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, |
196 | DCCP_RTO_MAX); | 145 | DCCP_RTO_MAX); |
197 | if (icsk->icsk_retransmits > 3 /* FIXME: sysctl_dccp_retries1 */) | 146 | if (icsk->icsk_retransmits > sysctl_dccp_retries1) |
198 | __sk_dst_reset(sk); | 147 | __sk_dst_reset(sk); |
199 | out:; | 148 | out:; |
200 | } | 149 | } |
@@ -264,3 +213,56 @@ out: | |||
264 | bh_unlock_sock(sk); | 213 | bh_unlock_sock(sk); |
265 | sock_put(sk); | 214 | sock_put(sk); |
266 | } | 215 | } |
216 | |||
217 | /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */ | ||
218 | static void dccp_delack_timer(unsigned long data) | ||
219 | { | ||
220 | struct sock *sk = (struct sock *)data; | ||
221 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
222 | |||
223 | bh_lock_sock(sk); | ||
224 | if (sock_owned_by_user(sk)) { | ||
225 | /* Try again later. */ | ||
226 | icsk->icsk_ack.blocked = 1; | ||
227 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); | ||
228 | sk_reset_timer(sk, &icsk->icsk_delack_timer, | ||
229 | jiffies + TCP_DELACK_MIN); | ||
230 | goto out; | ||
231 | } | ||
232 | |||
233 | if (sk->sk_state == DCCP_CLOSED || | ||
234 | !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) | ||
235 | goto out; | ||
236 | if (time_after(icsk->icsk_ack.timeout, jiffies)) { | ||
237 | sk_reset_timer(sk, &icsk->icsk_delack_timer, | ||
238 | icsk->icsk_ack.timeout); | ||
239 | goto out; | ||
240 | } | ||
241 | |||
242 | icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; | ||
243 | |||
244 | if (inet_csk_ack_scheduled(sk)) { | ||
245 | if (!icsk->icsk_ack.pingpong) { | ||
246 | /* Delayed ACK missed: inflate ATO. */ | ||
247 | icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, | ||
248 | icsk->icsk_rto); | ||
249 | } else { | ||
250 | /* Delayed ACK missed: leave pingpong mode and | ||
251 | * deflate ATO. | ||
252 | */ | ||
253 | icsk->icsk_ack.pingpong = 0; | ||
254 | icsk->icsk_ack.ato = TCP_ATO_MIN; | ||
255 | } | ||
256 | dccp_send_ack(sk); | ||
257 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); | ||
258 | } | ||
259 | out: | ||
260 | bh_unlock_sock(sk); | ||
261 | sock_put(sk); | ||
262 | } | ||
263 | |||
264 | void dccp_init_xmit_timers(struct sock *sk) | ||
265 | { | ||
266 | inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, | ||
267 | &dccp_keepalive_timer); | ||
268 | } | ||
diff --git a/net/decnet/Kconfig b/net/decnet/Kconfig index 36e72cb145b0..7914fd619c5c 100644 --- a/net/decnet/Kconfig +++ b/net/decnet/Kconfig | |||
@@ -41,11 +41,3 @@ config DECNET_ROUTER | |||
41 | 41 | ||
42 | See <file:Documentation/networking/decnet.txt> for more information. | 42 | See <file:Documentation/networking/decnet.txt> for more information. |
43 | 43 | ||
44 | config DECNET_ROUTE_FWMARK | ||
45 | bool "DECnet: use FWMARK value as routing key (EXPERIMENTAL)" | ||
46 | depends on DECNET_ROUTER && NETFILTER | ||
47 | help | ||
48 | If you say Y here, you will be able to specify different routes for | ||
49 | packets with different FWMARK ("firewalling mark") values | ||
50 | (see ipchains(8), "-m" argument). | ||
51 | |||
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 01861feb608d..0b9d4c955154 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/if_arp.h> | 38 | #include <linux/if_arp.h> |
39 | #include <linux/if_ether.h> | 39 | #include <linux/if_ether.h> |
40 | #include <linux/skbuff.h> | 40 | #include <linux/skbuff.h> |
41 | #include <linux/rtnetlink.h> | ||
42 | #include <linux/sysctl.h> | 41 | #include <linux/sysctl.h> |
43 | #include <linux/notifier.h> | 42 | #include <linux/notifier.h> |
44 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
@@ -47,6 +46,7 @@ | |||
47 | #include <net/dst.h> | 46 | #include <net/dst.h> |
48 | #include <net/flow.h> | 47 | #include <net/flow.h> |
49 | #include <net/fib_rules.h> | 48 | #include <net/fib_rules.h> |
49 | #include <net/netlink.h> | ||
50 | #include <net/dn.h> | 50 | #include <net/dn.h> |
51 | #include <net/dn_dev.h> | 51 | #include <net/dn_dev.h> |
52 | #include <net/dn_route.h> | 52 | #include <net/dn_route.h> |
@@ -73,7 +73,7 @@ static BLOCKING_NOTIFIER_HEAD(dnaddr_chain); | |||
73 | 73 | ||
74 | static struct dn_dev *dn_dev_create(struct net_device *dev, int *err); | 74 | static struct dn_dev *dn_dev_create(struct net_device *dev, int *err); |
75 | static void dn_dev_delete(struct net_device *dev); | 75 | static void dn_dev_delete(struct net_device *dev); |
76 | static void rtmsg_ifa(int event, struct dn_ifaddr *ifa); | 76 | static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa); |
77 | 77 | ||
78 | static int dn_eth_up(struct net_device *); | 78 | static int dn_eth_up(struct net_device *); |
79 | static void dn_eth_down(struct net_device *); | 79 | static void dn_eth_down(struct net_device *); |
@@ -255,12 +255,10 @@ static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms * | |||
255 | struct dn_dev_sysctl_table *t; | 255 | struct dn_dev_sysctl_table *t; |
256 | int i; | 256 | int i; |
257 | 257 | ||
258 | t = kmalloc(sizeof(*t), GFP_KERNEL); | 258 | t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL); |
259 | if (t == NULL) | 259 | if (t == NULL) |
260 | return; | 260 | return; |
261 | 261 | ||
262 | memcpy(t, &dn_dev_sysctl, sizeof(*t)); | ||
263 | |||
264 | for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) { | 262 | for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) { |
265 | long offset = (long)t->dn_dev_vars[i].data; | 263 | long offset = (long)t->dn_dev_vars[i].data; |
266 | t->dn_dev_vars[i].data = ((char *)parms) + offset; | 264 | t->dn_dev_vars[i].data = ((char *)parms) + offset; |
@@ -442,7 +440,7 @@ static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int de | |||
442 | } | 440 | } |
443 | } | 441 | } |
444 | 442 | ||
445 | rtmsg_ifa(RTM_DELADDR, ifa1); | 443 | dn_ifaddr_notify(RTM_DELADDR, ifa1); |
446 | blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1); | 444 | blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1); |
447 | if (destroy) { | 445 | if (destroy) { |
448 | dn_dev_free_ifa(ifa1); | 446 | dn_dev_free_ifa(ifa1); |
@@ -477,7 +475,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa) | |||
477 | ifa->ifa_next = dn_db->ifa_list; | 475 | ifa->ifa_next = dn_db->ifa_list; |
478 | dn_db->ifa_list = ifa; | 476 | dn_db->ifa_list = ifa; |
479 | 477 | ||
480 | rtmsg_ifa(RTM_NEWADDR, ifa); | 478 | dn_ifaddr_notify(RTM_NEWADDR, ifa); |
481 | blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); | 479 | blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); |
482 | 480 | ||
483 | return 0; | 481 | return 0; |
@@ -647,41 +645,62 @@ static struct dn_dev *dn_dev_by_index(int ifindex) | |||
647 | return dn_dev; | 645 | return dn_dev; |
648 | } | 646 | } |
649 | 647 | ||
650 | static int dn_dev_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | 648 | static struct nla_policy dn_ifa_policy[IFA_MAX+1] __read_mostly = { |
649 | [IFA_ADDRESS] = { .type = NLA_U16 }, | ||
650 | [IFA_LOCAL] = { .type = NLA_U16 }, | ||
651 | [IFA_LABEL] = { .type = NLA_STRING, | ||
652 | .len = IFNAMSIZ - 1 }, | ||
653 | }; | ||
654 | |||
655 | static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | ||
651 | { | 656 | { |
652 | struct rtattr **rta = arg; | 657 | struct nlattr *tb[IFA_MAX+1]; |
653 | struct dn_dev *dn_db; | 658 | struct dn_dev *dn_db; |
654 | struct ifaddrmsg *ifm = NLMSG_DATA(nlh); | 659 | struct ifaddrmsg *ifm; |
655 | struct dn_ifaddr *ifa, **ifap; | 660 | struct dn_ifaddr *ifa, **ifap; |
661 | int err = -EADDRNOTAVAIL; | ||
662 | |||
663 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); | ||
664 | if (err < 0) | ||
665 | goto errout; | ||
656 | 666 | ||
667 | ifm = nlmsg_data(nlh); | ||
657 | if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL) | 668 | if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL) |
658 | return -EADDRNOTAVAIL; | 669 | goto errout; |
670 | |||
671 | for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) { | ||
672 | if (tb[IFA_LOCAL] && | ||
673 | nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) | ||
674 | continue; | ||
659 | 675 | ||
660 | for(ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next) { | 676 | if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) |
661 | void *tmp = rta[IFA_LOCAL-1]; | ||
662 | if ((tmp && memcmp(RTA_DATA(tmp), &ifa->ifa_local, 2)) || | ||
663 | (rta[IFA_LABEL-1] && rtattr_strcmp(rta[IFA_LABEL-1], ifa->ifa_label))) | ||
664 | continue; | 677 | continue; |
665 | 678 | ||
666 | dn_dev_del_ifa(dn_db, ifap, 1); | 679 | dn_dev_del_ifa(dn_db, ifap, 1); |
667 | return 0; | 680 | return 0; |
668 | } | 681 | } |
669 | 682 | ||
670 | return -EADDRNOTAVAIL; | 683 | errout: |
684 | return err; | ||
671 | } | 685 | } |
672 | 686 | ||
673 | static int dn_dev_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | 687 | static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) |
674 | { | 688 | { |
675 | struct rtattr **rta = arg; | 689 | struct nlattr *tb[IFA_MAX+1]; |
676 | struct net_device *dev; | 690 | struct net_device *dev; |
677 | struct dn_dev *dn_db; | 691 | struct dn_dev *dn_db; |
678 | struct ifaddrmsg *ifm = NLMSG_DATA(nlh); | 692 | struct ifaddrmsg *ifm; |
679 | struct dn_ifaddr *ifa; | 693 | struct dn_ifaddr *ifa; |
680 | int rv; | 694 | int err; |
681 | 695 | ||
682 | if (rta[IFA_LOCAL-1] == NULL) | 696 | err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); |
697 | if (err < 0) | ||
698 | return err; | ||
699 | |||
700 | if (tb[IFA_LOCAL] == NULL) | ||
683 | return -EINVAL; | 701 | return -EINVAL; |
684 | 702 | ||
703 | ifm = nlmsg_data(nlh); | ||
685 | if ((dev = __dev_get_by_index(ifm->ifa_index)) == NULL) | 704 | if ((dev = __dev_get_by_index(ifm->ifa_index)) == NULL) |
686 | return -ENODEV; | 705 | return -ENODEV; |
687 | 706 | ||
@@ -695,69 +714,77 @@ static int dn_dev_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *a | |||
695 | if ((ifa = dn_dev_alloc_ifa()) == NULL) | 714 | if ((ifa = dn_dev_alloc_ifa()) == NULL) |
696 | return -ENOBUFS; | 715 | return -ENOBUFS; |
697 | 716 | ||
698 | if (!rta[IFA_ADDRESS - 1]) | 717 | if (tb[IFA_ADDRESS] == NULL) |
699 | rta[IFA_ADDRESS - 1] = rta[IFA_LOCAL - 1]; | 718 | tb[IFA_ADDRESS] = tb[IFA_LOCAL]; |
700 | memcpy(&ifa->ifa_local, RTA_DATA(rta[IFA_LOCAL-1]), 2); | 719 | |
701 | memcpy(&ifa->ifa_address, RTA_DATA(rta[IFA_ADDRESS-1]), 2); | 720 | ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]); |
721 | ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]); | ||
702 | ifa->ifa_flags = ifm->ifa_flags; | 722 | ifa->ifa_flags = ifm->ifa_flags; |
703 | ifa->ifa_scope = ifm->ifa_scope; | 723 | ifa->ifa_scope = ifm->ifa_scope; |
704 | ifa->ifa_dev = dn_db; | 724 | ifa->ifa_dev = dn_db; |
705 | if (rta[IFA_LABEL-1]) | 725 | |
706 | rtattr_strlcpy(ifa->ifa_label, rta[IFA_LABEL-1], IFNAMSIZ); | 726 | if (tb[IFA_LABEL]) |
727 | nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); | ||
707 | else | 728 | else |
708 | memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); | 729 | memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); |
709 | 730 | ||
710 | rv = dn_dev_insert_ifa(dn_db, ifa); | 731 | err = dn_dev_insert_ifa(dn_db, ifa); |
711 | if (rv) | 732 | if (err) |
712 | dn_dev_free_ifa(ifa); | 733 | dn_dev_free_ifa(ifa); |
713 | return rv; | 734 | |
735 | return err; | ||
714 | } | 736 | } |
715 | 737 | ||
716 | static int dn_dev_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa, | 738 | static inline size_t dn_ifaddr_nlmsg_size(void) |
717 | u32 pid, u32 seq, int event, unsigned int flags) | 739 | { |
740 | return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) | ||
741 | + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ | ||
742 | + nla_total_size(2) /* IFA_ADDRESS */ | ||
743 | + nla_total_size(2); /* IFA_LOCAL */ | ||
744 | } | ||
745 | |||
746 | static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa, | ||
747 | u32 pid, u32 seq, int event, unsigned int flags) | ||
718 | { | 748 | { |
719 | struct ifaddrmsg *ifm; | 749 | struct ifaddrmsg *ifm; |
720 | struct nlmsghdr *nlh; | 750 | struct nlmsghdr *nlh; |
721 | unsigned char *b = skb->tail; | ||
722 | 751 | ||
723 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*ifm), flags); | 752 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags); |
724 | ifm = NLMSG_DATA(nlh); | 753 | if (nlh == NULL) |
754 | return -ENOBUFS; | ||
725 | 755 | ||
756 | ifm = nlmsg_data(nlh); | ||
726 | ifm->ifa_family = AF_DECnet; | 757 | ifm->ifa_family = AF_DECnet; |
727 | ifm->ifa_prefixlen = 16; | 758 | ifm->ifa_prefixlen = 16; |
728 | ifm->ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT; | 759 | ifm->ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT; |
729 | ifm->ifa_scope = ifa->ifa_scope; | 760 | ifm->ifa_scope = ifa->ifa_scope; |
730 | ifm->ifa_index = ifa->ifa_dev->dev->ifindex; | 761 | ifm->ifa_index = ifa->ifa_dev->dev->ifindex; |
762 | |||
731 | if (ifa->ifa_address) | 763 | if (ifa->ifa_address) |
732 | RTA_PUT(skb, IFA_ADDRESS, 2, &ifa->ifa_address); | 764 | NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address); |
733 | if (ifa->ifa_local) | 765 | if (ifa->ifa_local) |
734 | RTA_PUT(skb, IFA_LOCAL, 2, &ifa->ifa_local); | 766 | NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local); |
735 | if (ifa->ifa_label[0]) | 767 | if (ifa->ifa_label[0]) |
736 | RTA_PUT(skb, IFA_LABEL, IFNAMSIZ, &ifa->ifa_label); | 768 | NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label); |
737 | nlh->nlmsg_len = skb->tail - b; | 769 | |
738 | return skb->len; | 770 | return nlmsg_end(skb, nlh); |
739 | 771 | ||
740 | nlmsg_failure: | 772 | nla_put_failure: |
741 | rtattr_failure: | 773 | return nlmsg_cancel(skb, nlh); |
742 | skb_trim(skb, b - skb->data); | ||
743 | return -1; | ||
744 | } | 774 | } |
745 | 775 | ||
746 | static void rtmsg_ifa(int event, struct dn_ifaddr *ifa) | 776 | static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa) |
747 | { | 777 | { |
748 | struct sk_buff *skb; | 778 | struct sk_buff *skb; |
749 | int payload = sizeof(struct ifaddrmsg) + 128; | ||
750 | int err = -ENOBUFS; | 779 | int err = -ENOBUFS; |
751 | 780 | ||
752 | skb = alloc_skb(nlmsg_total_size(payload), GFP_KERNEL); | 781 | skb = alloc_skb(dn_ifaddr_nlmsg_size(), GFP_KERNEL); |
753 | if (skb == NULL) | 782 | if (skb == NULL) |
754 | goto errout; | 783 | goto errout; |
755 | 784 | ||
756 | err = dn_dev_fill_ifaddr(skb, ifa, 0, 0, event, 0); | 785 | err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0); |
757 | if (err < 0) { | 786 | /* failure implies BUG in dn_ifaddr_nlmsg_size() */ |
758 | kfree_skb(skb); | 787 | BUG_ON(err < 0); |
759 | goto errout; | ||
760 | } | ||
761 | 788 | ||
762 | err = rtnl_notify(skb, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL); | 789 | err = rtnl_notify(skb, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL); |
763 | errout: | 790 | errout: |
@@ -765,39 +792,43 @@ errout: | |||
765 | rtnl_set_sk_err(RTNLGRP_DECnet_IFADDR, err); | 792 | rtnl_set_sk_err(RTNLGRP_DECnet_IFADDR, err); |
766 | } | 793 | } |
767 | 794 | ||
768 | static int dn_dev_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) | 795 | static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) |
769 | { | 796 | { |
770 | int idx, dn_idx; | 797 | int idx, dn_idx = 0, skip_ndevs, skip_naddr; |
771 | int s_idx, s_dn_idx; | ||
772 | struct net_device *dev; | 798 | struct net_device *dev; |
773 | struct dn_dev *dn_db; | 799 | struct dn_dev *dn_db; |
774 | struct dn_ifaddr *ifa; | 800 | struct dn_ifaddr *ifa; |
775 | 801 | ||
776 | s_idx = cb->args[0]; | 802 | skip_ndevs = cb->args[0]; |
777 | s_dn_idx = dn_idx = cb->args[1]; | 803 | skip_naddr = cb->args[1]; |
804 | |||
778 | read_lock(&dev_base_lock); | 805 | read_lock(&dev_base_lock); |
779 | for(dev = dev_base, idx = 0; dev; dev = dev->next, idx++) { | 806 | for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) { |
780 | if (idx < s_idx) | 807 | if (idx < skip_ndevs) |
781 | continue; | 808 | continue; |
782 | if (idx > s_idx) | 809 | else if (idx > skip_ndevs) { |
783 | s_dn_idx = 0; | 810 | /* Only skip over addresses for first dev dumped |
811 | * in this iteration (idx == skip_ndevs) */ | ||
812 | skip_naddr = 0; | ||
813 | } | ||
814 | |||
784 | if ((dn_db = dev->dn_ptr) == NULL) | 815 | if ((dn_db = dev->dn_ptr) == NULL) |
785 | continue; | 816 | continue; |
786 | 817 | ||
787 | for(ifa = dn_db->ifa_list, dn_idx = 0; ifa; ifa = ifa->ifa_next, dn_idx++) { | 818 | for (ifa = dn_db->ifa_list, dn_idx = 0; ifa; |
788 | if (dn_idx < s_dn_idx) | 819 | ifa = ifa->ifa_next, dn_idx++) { |
820 | if (dn_idx < skip_naddr) | ||
789 | continue; | 821 | continue; |
790 | 822 | ||
791 | if (dn_dev_fill_ifaddr(skb, ifa, | 823 | if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, |
792 | NETLINK_CB(cb->skb).pid, | 824 | cb->nlh->nlmsg_seq, RTM_NEWADDR, |
793 | cb->nlh->nlmsg_seq, | 825 | NLM_F_MULTI) < 0) |
794 | RTM_NEWADDR, | ||
795 | NLM_F_MULTI) <= 0) | ||
796 | goto done; | 826 | goto done; |
797 | } | 827 | } |
798 | } | 828 | } |
799 | done: | 829 | done: |
800 | read_unlock(&dev_base_lock); | 830 | read_unlock(&dev_base_lock); |
831 | |||
801 | cb->args[0] = idx; | 832 | cb->args[0] = idx; |
802 | cb->args[1] = dn_idx; | 833 | cb->args[1] = dn_idx; |
803 | 834 | ||
@@ -1414,9 +1445,9 @@ static struct file_operations dn_dev_seq_fops = { | |||
1414 | 1445 | ||
1415 | static struct rtnetlink_link dnet_rtnetlink_table[RTM_NR_MSGTYPES] = | 1446 | static struct rtnetlink_link dnet_rtnetlink_table[RTM_NR_MSGTYPES] = |
1416 | { | 1447 | { |
1417 | [RTM_NEWADDR - RTM_BASE] = { .doit = dn_dev_rtm_newaddr, }, | 1448 | [RTM_NEWADDR - RTM_BASE] = { .doit = dn_nl_newaddr, }, |
1418 | [RTM_DELADDR - RTM_BASE] = { .doit = dn_dev_rtm_deladdr, }, | 1449 | [RTM_DELADDR - RTM_BASE] = { .doit = dn_nl_deladdr, }, |
1419 | [RTM_GETADDR - RTM_BASE] = { .dumpit = dn_dev_dump_ifaddr, }, | 1450 | [RTM_GETADDR - RTM_BASE] = { .dumpit = dn_nl_dump_ifaddr, }, |
1420 | #ifdef CONFIG_DECNET_ROUTER | 1451 | #ifdef CONFIG_DECNET_ROUTER |
1421 | [RTM_NEWROUTE - RTM_BASE] = { .doit = dn_fib_rtm_newroute, }, | 1452 | [RTM_NEWROUTE - RTM_BASE] = { .doit = dn_fib_rtm_newroute, }, |
1422 | [RTM_DELROUTE - RTM_BASE] = { .doit = dn_fib_rtm_delroute, }, | 1453 | [RTM_DELROUTE - RTM_BASE] = { .doit = dn_fib_rtm_delroute, }, |
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index ff0ebe99137d..7322bb36e825 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c | |||
@@ -591,7 +591,6 @@ static int dn_neigh_seq_open(struct inode *inode, struct file *file) | |||
591 | 591 | ||
592 | seq = file->private_data; | 592 | seq = file->private_data; |
593 | seq->private = s; | 593 | seq->private = s; |
594 | memset(s, 0, sizeof(*s)); | ||
595 | out: | 594 | out: |
596 | return rc; | 595 | return rc; |
597 | out_kfree: | 596 | out_kfree: |
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c index 7683d4f754d2..39a6cf7fb566 100644 --- a/net/decnet/dn_nsp_in.c +++ b/net/decnet/dn_nsp_in.c | |||
@@ -804,7 +804,7 @@ got_it: | |||
804 | goto free_out; | 804 | goto free_out; |
805 | } | 805 | } |
806 | 806 | ||
807 | return sk_receive_skb(sk, skb); | 807 | return sk_receive_skb(sk, skb, 0); |
808 | } | 808 | } |
809 | 809 | ||
810 | return dn_nsp_no_socket(skb, reason); | 810 | return dn_nsp_no_socket(skb, reason); |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 23489f7232d2..9881933167bd 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -269,9 +269,7 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) | |||
269 | { | 269 | { |
270 | return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) | | 270 | return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) | |
271 | (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) | | 271 | (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) | |
272 | #ifdef CONFIG_DECNET_ROUTE_FWMARK | 272 | (fl1->mark ^ fl2->mark) | |
273 | (fl1->nl_u.dn_u.fwmark ^ fl2->nl_u.dn_u.fwmark) | | ||
274 | #endif | ||
275 | (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) | | 273 | (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) | |
276 | (fl1->oif ^ fl2->oif) | | 274 | (fl1->oif ^ fl2->oif) | |
277 | (fl1->iif ^ fl2->iif)) == 0; | 275 | (fl1->iif ^ fl2->iif)) == 0; |
@@ -882,10 +880,8 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old | |||
882 | { .daddr = oldflp->fld_dst, | 880 | { .daddr = oldflp->fld_dst, |
883 | .saddr = oldflp->fld_src, | 881 | .saddr = oldflp->fld_src, |
884 | .scope = RT_SCOPE_UNIVERSE, | 882 | .scope = RT_SCOPE_UNIVERSE, |
885 | #ifdef CONFIG_DECNET_ROUTE_FWMARK | ||
886 | .fwmark = oldflp->fld_fwmark | ||
887 | #endif | ||
888 | } }, | 883 | } }, |
884 | .mark = oldflp->mark, | ||
889 | .iif = loopback_dev.ifindex, | 885 | .iif = loopback_dev.ifindex, |
890 | .oif = oldflp->oif }; | 886 | .oif = oldflp->oif }; |
891 | struct dn_route *rt = NULL; | 887 | struct dn_route *rt = NULL; |
@@ -903,7 +899,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old | |||
903 | "dn_route_output_slow: dst=%04x src=%04x mark=%d" | 899 | "dn_route_output_slow: dst=%04x src=%04x mark=%d" |
904 | " iif=%d oif=%d\n", dn_ntohs(oldflp->fld_dst), | 900 | " iif=%d oif=%d\n", dn_ntohs(oldflp->fld_dst), |
905 | dn_ntohs(oldflp->fld_src), | 901 | dn_ntohs(oldflp->fld_src), |
906 | oldflp->fld_fwmark, loopback_dev.ifindex, oldflp->oif); | 902 | oldflp->mark, loopback_dev.ifindex, oldflp->oif); |
907 | 903 | ||
908 | /* If we have an output interface, verify its a DECnet device */ | 904 | /* If we have an output interface, verify its a DECnet device */ |
909 | if (oldflp->oif) { | 905 | if (oldflp->oif) { |
@@ -1108,9 +1104,7 @@ make_route: | |||
1108 | rt->fl.fld_dst = oldflp->fld_dst; | 1104 | rt->fl.fld_dst = oldflp->fld_dst; |
1109 | rt->fl.oif = oldflp->oif; | 1105 | rt->fl.oif = oldflp->oif; |
1110 | rt->fl.iif = 0; | 1106 | rt->fl.iif = 0; |
1111 | #ifdef CONFIG_DECNET_ROUTE_FWMARK | 1107 | rt->fl.mark = oldflp->mark; |
1112 | rt->fl.fld_fwmark = oldflp->fld_fwmark; | ||
1113 | #endif | ||
1114 | 1108 | ||
1115 | rt->rt_saddr = fl.fld_src; | 1109 | rt->rt_saddr = fl.fld_src; |
1116 | rt->rt_daddr = fl.fld_dst; | 1110 | rt->rt_daddr = fl.fld_dst; |
@@ -1178,9 +1172,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl | |||
1178 | rt = rcu_dereference(rt->u.rt_next)) { | 1172 | rt = rcu_dereference(rt->u.rt_next)) { |
1179 | if ((flp->fld_dst == rt->fl.fld_dst) && | 1173 | if ((flp->fld_dst == rt->fl.fld_dst) && |
1180 | (flp->fld_src == rt->fl.fld_src) && | 1174 | (flp->fld_src == rt->fl.fld_src) && |
1181 | #ifdef CONFIG_DECNET_ROUTE_FWMARK | 1175 | (flp->mark == rt->fl.mark) && |
1182 | (flp->fld_fwmark == rt->fl.fld_fwmark) && | ||
1183 | #endif | ||
1184 | (rt->fl.iif == 0) && | 1176 | (rt->fl.iif == 0) && |
1185 | (rt->fl.oif == flp->oif)) { | 1177 | (rt->fl.oif == flp->oif)) { |
1186 | rt->u.dst.lastuse = jiffies; | 1178 | rt->u.dst.lastuse = jiffies; |
@@ -1235,10 +1227,8 @@ static int dn_route_input_slow(struct sk_buff *skb) | |||
1235 | { .daddr = cb->dst, | 1227 | { .daddr = cb->dst, |
1236 | .saddr = cb->src, | 1228 | .saddr = cb->src, |
1237 | .scope = RT_SCOPE_UNIVERSE, | 1229 | .scope = RT_SCOPE_UNIVERSE, |
1238 | #ifdef CONFIG_DECNET_ROUTE_FWMARK | ||
1239 | .fwmark = skb->nfmark | ||
1240 | #endif | ||
1241 | } }, | 1230 | } }, |
1231 | .mark = skb->mark, | ||
1242 | .iif = skb->dev->ifindex }; | 1232 | .iif = skb->dev->ifindex }; |
1243 | struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; | 1233 | struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; |
1244 | int err = -EINVAL; | 1234 | int err = -EINVAL; |
@@ -1385,7 +1375,7 @@ make_route: | |||
1385 | rt->fl.fld_dst = cb->dst; | 1375 | rt->fl.fld_dst = cb->dst; |
1386 | rt->fl.oif = 0; | 1376 | rt->fl.oif = 0; |
1387 | rt->fl.iif = in_dev->ifindex; | 1377 | rt->fl.iif = in_dev->ifindex; |
1388 | rt->fl.fld_fwmark = fl.fld_fwmark; | 1378 | rt->fl.mark = fl.mark; |
1389 | 1379 | ||
1390 | rt->u.dst.flags = DST_HOST; | 1380 | rt->u.dst.flags = DST_HOST; |
1391 | rt->u.dst.neighbour = neigh; | 1381 | rt->u.dst.neighbour = neigh; |
@@ -1457,9 +1447,7 @@ int dn_route_input(struct sk_buff *skb) | |||
1457 | if ((rt->fl.fld_src == cb->src) && | 1447 | if ((rt->fl.fld_src == cb->src) && |
1458 | (rt->fl.fld_dst == cb->dst) && | 1448 | (rt->fl.fld_dst == cb->dst) && |
1459 | (rt->fl.oif == 0) && | 1449 | (rt->fl.oif == 0) && |
1460 | #ifdef CONFIG_DECNET_ROUTE_FWMARK | 1450 | (rt->fl.mark == skb->mark) && |
1461 | (rt->fl.fld_fwmark == skb->nfmark) && | ||
1462 | #endif | ||
1463 | (rt->fl.iif == cb->iif)) { | 1451 | (rt->fl.iif == cb->iif)) { |
1464 | rt->u.dst.lastuse = jiffies; | 1452 | rt->u.dst.lastuse = jiffies; |
1465 | dst_hold(&rt->u.dst); | 1453 | dst_hold(&rt->u.dst); |
@@ -1481,7 +1469,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | |||
1481 | struct rtmsg *r; | 1469 | struct rtmsg *r; |
1482 | struct nlmsghdr *nlh; | 1470 | struct nlmsghdr *nlh; |
1483 | unsigned char *b = skb->tail; | 1471 | unsigned char *b = skb->tail; |
1484 | struct rta_cacheinfo ci; | 1472 | long expires; |
1485 | 1473 | ||
1486 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); | 1474 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); |
1487 | r = NLMSG_DATA(nlh); | 1475 | r = NLMSG_DATA(nlh); |
@@ -1514,16 +1502,10 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | |||
1514 | RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); | 1502 | RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); |
1515 | if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) | 1503 | if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) |
1516 | goto rtattr_failure; | 1504 | goto rtattr_failure; |
1517 | ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse); | 1505 | expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; |
1518 | ci.rta_used = rt->u.dst.__use; | 1506 | if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, expires, |
1519 | ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt); | 1507 | rt->u.dst.error) < 0) |
1520 | if (rt->u.dst.expires) | 1508 | goto rtattr_failure; |
1521 | ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies); | ||
1522 | else | ||
1523 | ci.rta_expires = 0; | ||
1524 | ci.rta_error = rt->u.dst.error; | ||
1525 | ci.rta_id = ci.rta_ts = ci.rta_tsage = 0; | ||
1526 | RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci); | ||
1527 | if (rt->fl.iif) | 1509 | if (rt->fl.iif) |
1528 | RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); | 1510 | RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); |
1529 | 1511 | ||
@@ -1604,8 +1586,6 @@ int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) | |||
1604 | if (rtm->rtm_flags & RTM_F_NOTIFY) | 1586 | if (rtm->rtm_flags & RTM_F_NOTIFY) |
1605 | rt->rt_flags |= RTCF_NOTIFY; | 1587 | rt->rt_flags |= RTCF_NOTIFY; |
1606 | 1588 | ||
1607 | NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid; | ||
1608 | |||
1609 | err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); | 1589 | err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); |
1610 | 1590 | ||
1611 | if (err == 0) | 1591 | if (err == 0) |
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 590e0a72495c..e32d0c3d5a96 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c | |||
@@ -45,10 +45,6 @@ struct dn_fib_rule | |||
45 | __le16 dstmask; | 45 | __le16 dstmask; |
46 | __le16 srcmap; | 46 | __le16 srcmap; |
47 | u8 flags; | 47 | u8 flags; |
48 | #ifdef CONFIG_DECNET_ROUTE_FWMARK | ||
49 | u32 fwmark; | ||
50 | u32 fwmask; | ||
51 | #endif | ||
52 | }; | 48 | }; |
53 | 49 | ||
54 | static struct dn_fib_rule default_rule = { | 50 | static struct dn_fib_rule default_rule = { |
@@ -112,13 +108,9 @@ errout: | |||
112 | } | 108 | } |
113 | 109 | ||
114 | static struct nla_policy dn_fib_rule_policy[FRA_MAX+1] __read_mostly = { | 110 | static struct nla_policy dn_fib_rule_policy[FRA_MAX+1] __read_mostly = { |
115 | [FRA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, | 111 | FRA_GENERIC_POLICY, |
116 | [FRA_PRIORITY] = { .type = NLA_U32 }, | ||
117 | [FRA_SRC] = { .type = NLA_U16 }, | 112 | [FRA_SRC] = { .type = NLA_U16 }, |
118 | [FRA_DST] = { .type = NLA_U16 }, | 113 | [FRA_DST] = { .type = NLA_U16 }, |
119 | [FRA_FWMARK] = { .type = NLA_U32 }, | ||
120 | [FRA_FWMASK] = { .type = NLA_U32 }, | ||
121 | [FRA_TABLE] = { .type = NLA_U32 }, | ||
122 | }; | 114 | }; |
123 | 115 | ||
124 | static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) | 116 | static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) |
@@ -131,11 +123,6 @@ static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) | |||
131 | ((daddr ^ r->dst) & r->dstmask)) | 123 | ((daddr ^ r->dst) & r->dstmask)) |
132 | return 0; | 124 | return 0; |
133 | 125 | ||
134 | #ifdef CONFIG_DECNET_ROUTE_FWMARK | ||
135 | if ((r->fwmark ^ fl->fld_fwmark) & r->fwmask) | ||
136 | return 0; | ||
137 | #endif | ||
138 | |||
139 | return 1; | 126 | return 1; |
140 | } | 127 | } |
141 | 128 | ||
@@ -169,20 +156,6 @@ static int dn_fib_rule_configure(struct fib_rule *rule, struct sk_buff *skb, | |||
169 | if (tb[FRA_DST]) | 156 | if (tb[FRA_DST]) |
170 | r->dst = nla_get_u16(tb[FRA_DST]); | 157 | r->dst = nla_get_u16(tb[FRA_DST]); |
171 | 158 | ||
172 | #ifdef CONFIG_DECNET_ROUTE_FWMARK | ||
173 | if (tb[FRA_FWMARK]) { | ||
174 | r->fwmark = nla_get_u32(tb[FRA_FWMARK]); | ||
175 | if (r->fwmark) | ||
176 | /* compatibility: if the mark value is non-zero all bits | ||
177 | * are compared unless a mask is explicitly specified. | ||
178 | */ | ||
179 | r->fwmask = 0xFFFFFFFF; | ||
180 | } | ||
181 | |||
182 | if (tb[FRA_FWMASK]) | ||
183 | r->fwmask = nla_get_u32(tb[FRA_FWMASK]); | ||
184 | #endif | ||
185 | |||
186 | r->src_len = frh->src_len; | 159 | r->src_len = frh->src_len; |
187 | r->srcmask = dnet_make_mask(r->src_len); | 160 | r->srcmask = dnet_make_mask(r->src_len); |
188 | r->dst_len = frh->dst_len; | 161 | r->dst_len = frh->dst_len; |
@@ -203,14 +176,6 @@ static int dn_fib_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, | |||
203 | if (frh->dst_len && (r->dst_len != frh->dst_len)) | 176 | if (frh->dst_len && (r->dst_len != frh->dst_len)) |
204 | return 0; | 177 | return 0; |
205 | 178 | ||
206 | #ifdef CONFIG_DECNET_ROUTE_FWMARK | ||
207 | if (tb[FRA_FWMARK] && (r->fwmark != nla_get_u32(tb[FRA_FWMARK]))) | ||
208 | return 0; | ||
209 | |||
210 | if (tb[FRA_FWMASK] && (r->fwmask != nla_get_u32(tb[FRA_FWMASK]))) | ||
211 | return 0; | ||
212 | #endif | ||
213 | |||
214 | if (tb[FRA_SRC] && (r->src != nla_get_u16(tb[FRA_SRC]))) | 179 | if (tb[FRA_SRC] && (r->src != nla_get_u16(tb[FRA_SRC]))) |
215 | return 0; | 180 | return 0; |
216 | 181 | ||
@@ -248,12 +213,6 @@ static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | |||
248 | frh->src_len = r->src_len; | 213 | frh->src_len = r->src_len; |
249 | frh->tos = 0; | 214 | frh->tos = 0; |
250 | 215 | ||
251 | #ifdef CONFIG_DECNET_ROUTE_FWMARK | ||
252 | if (r->fwmark) | ||
253 | NLA_PUT_U32(skb, FRA_FWMARK, r->fwmark); | ||
254 | if (r->fwmask || r->fwmark) | ||
255 | NLA_PUT_U32(skb, FRA_FWMASK, r->fwmask); | ||
256 | #endif | ||
257 | if (r->dst_len) | 216 | if (r->dst_len) |
258 | NLA_PUT_U16(skb, FRA_DST, r->dst); | 217 | NLA_PUT_U16(skb, FRA_DST, r->dst); |
259 | if (r->src_len) | 218 | if (r->src_len) |
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index 317904bb5896..bdbc3f431668 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c | |||
@@ -263,6 +263,32 @@ static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct dn_kern | |||
263 | return 0; | 263 | return 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi) | ||
267 | { | ||
268 | size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) | ||
269 | + nla_total_size(4) /* RTA_TABLE */ | ||
270 | + nla_total_size(2) /* RTA_DST */ | ||
271 | + nla_total_size(4); /* RTA_PRIORITY */ | ||
272 | |||
273 | /* space for nested metrics */ | ||
274 | payload += nla_total_size((RTAX_MAX * nla_total_size(4))); | ||
275 | |||
276 | if (fi->fib_nhs) { | ||
277 | /* Also handles the special case fib_nhs == 1 */ | ||
278 | |||
279 | /* each nexthop is packed in an attribute */ | ||
280 | size_t nhsize = nla_total_size(sizeof(struct rtnexthop)); | ||
281 | |||
282 | /* may contain a gateway attribute */ | ||
283 | nhsize += nla_total_size(4); | ||
284 | |||
285 | /* all nexthops are packed in a nested attribute */ | ||
286 | payload += nla_total_size(fi->fib_nhs * nhsize); | ||
287 | } | ||
288 | |||
289 | return payload; | ||
290 | } | ||
291 | |||
266 | static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | 292 | static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, |
267 | u32 tb_id, u8 type, u8 scope, void *dst, int dst_len, | 293 | u32 tb_id, u8 type, u8 scope, void *dst, int dst_len, |
268 | struct dn_fib_info *fi, unsigned int flags) | 294 | struct dn_fib_info *fi, unsigned int flags) |
@@ -335,17 +361,15 @@ static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id, | |||
335 | u32 pid = req ? req->pid : 0; | 361 | u32 pid = req ? req->pid : 0; |
336 | int err = -ENOBUFS; | 362 | int err = -ENOBUFS; |
337 | 363 | ||
338 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | 364 | skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL); |
339 | if (skb == NULL) | 365 | if (skb == NULL) |
340 | goto errout; | 366 | goto errout; |
341 | 367 | ||
342 | err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id, | 368 | err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id, |
343 | f->fn_type, f->fn_scope, &f->fn_key, z, | 369 | f->fn_type, f->fn_scope, &f->fn_key, z, |
344 | DN_FIB_INFO(f), 0); | 370 | DN_FIB_INFO(f), 0); |
345 | if (err < 0) { | 371 | /* failure implies BUG in dn_fib_nlmsg_size() */ |
346 | kfree_skb(skb); | 372 | BUG_ON(err < 0); |
347 | goto errout; | ||
348 | } | ||
349 | 373 | ||
350 | err = rtnl_notify(skb, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL); | 374 | err = rtnl_notify(skb, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL); |
351 | errout: | 375 | errout: |
@@ -807,10 +831,11 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create) | |||
807 | printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n"); | 831 | printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n"); |
808 | return NULL; | 832 | return NULL; |
809 | } | 833 | } |
810 | if ((t = kmalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash), GFP_KERNEL)) == NULL) | ||
811 | return NULL; | ||
812 | 834 | ||
813 | memset(t, 0, sizeof(struct dn_fib_table)); | 835 | t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash), |
836 | GFP_KERNEL); | ||
837 | if (t == NULL) | ||
838 | return NULL; | ||
814 | 839 | ||
815 | t->n = n; | 840 | t->n = n; |
816 | t->insert = dn_fib_table_insert; | 841 | t->insert = dn_fib_table_insert; |
@@ -818,7 +843,6 @@ struct dn_fib_table *dn_fib_get_table(u32 n, int create) | |||
818 | t->lookup = dn_fib_table_lookup; | 843 | t->lookup = dn_fib_table_lookup; |
819 | t->flush = dn_fib_table_flush; | 844 | t->flush = dn_fib_table_flush; |
820 | t->dump = dn_fib_table_dump; | 845 | t->dump = dn_fib_table_dump; |
821 | memset(t->data, 0, sizeof(struct dn_hash)); | ||
822 | hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]); | 846 | hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]); |
823 | 847 | ||
824 | return t; | 848 | return t; |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 4bd78c8cfb26..2d31bf3f05c5 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -60,7 +60,6 @@ | |||
60 | #include <net/ip.h> | 60 | #include <net/ip.h> |
61 | #include <asm/uaccess.h> | 61 | #include <asm/uaccess.h> |
62 | #include <asm/system.h> | 62 | #include <asm/system.h> |
63 | #include <asm/checksum.h> | ||
64 | 63 | ||
65 | __setup("ether=", netdev_boot_setup); | 64 | __setup("ether=", netdev_boot_setup); |
66 | 65 | ||
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 13b1e5fff7e4..b1c6d1f717d9 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c | |||
@@ -67,7 +67,7 @@ static int ieee80211_networks_allocate(struct ieee80211_device *ieee) | |||
67 | return 0; | 67 | return 0; |
68 | 68 | ||
69 | ieee->networks = | 69 | ieee->networks = |
70 | kmalloc(MAX_NETWORK_COUNT * sizeof(struct ieee80211_network), | 70 | kzalloc(MAX_NETWORK_COUNT * sizeof(struct ieee80211_network), |
71 | GFP_KERNEL); | 71 | GFP_KERNEL); |
72 | if (!ieee->networks) { | 72 | if (!ieee->networks) { |
73 | printk(KERN_WARNING "%s: Out of memory allocating beacons\n", | 73 | printk(KERN_WARNING "%s: Out of memory allocating beacons\n", |
@@ -75,9 +75,6 @@ static int ieee80211_networks_allocate(struct ieee80211_device *ieee) | |||
75 | return -ENOMEM; | 75 | return -ENOMEM; |
76 | } | 76 | } |
77 | 77 | ||
78 | memset(ieee->networks, 0, | ||
79 | MAX_NETWORK_COUNT * sizeof(struct ieee80211_network)); | ||
80 | |||
81 | return 0; | 78 | return 0; |
82 | } | 79 | } |
83 | 80 | ||
@@ -118,6 +115,21 @@ static void ieee80211_networks_initialize(struct ieee80211_device *ieee) | |||
118 | &ieee->network_free_list); | 115 | &ieee->network_free_list); |
119 | } | 116 | } |
120 | 117 | ||
118 | static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) | ||
119 | { | ||
120 | if ((new_mtu < 68) || (new_mtu > IEEE80211_DATA_LEN)) | ||
121 | return -EINVAL; | ||
122 | dev->mtu = new_mtu; | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | static struct net_device_stats *ieee80211_generic_get_stats( | ||
127 | struct net_device *dev) | ||
128 | { | ||
129 | struct ieee80211_device *ieee = netdev_priv(dev); | ||
130 | return &ieee->stats; | ||
131 | } | ||
132 | |||
121 | struct net_device *alloc_ieee80211(int sizeof_priv) | 133 | struct net_device *alloc_ieee80211(int sizeof_priv) |
122 | { | 134 | { |
123 | struct ieee80211_device *ieee; | 135 | struct ieee80211_device *ieee; |
@@ -133,6 +145,11 @@ struct net_device *alloc_ieee80211(int sizeof_priv) | |||
133 | } | 145 | } |
134 | ieee = netdev_priv(dev); | 146 | ieee = netdev_priv(dev); |
135 | dev->hard_start_xmit = ieee80211_xmit; | 147 | dev->hard_start_xmit = ieee80211_xmit; |
148 | dev->change_mtu = ieee80211_change_mtu; | ||
149 | |||
150 | /* Drivers are free to override this if the generic implementation | ||
151 | * does not meet their needs. */ | ||
152 | dev->get_stats = ieee80211_generic_get_stats; | ||
136 | 153 | ||
137 | ieee->dev = dev; | 154 | ieee->dev = dev; |
138 | 155 | ||
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 2759312a4204..d97e5412e31b 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c | |||
@@ -415,17 +415,16 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
415 | ieee->host_mc_decrypt : ieee->host_decrypt; | 415 | ieee->host_mc_decrypt : ieee->host_decrypt; |
416 | 416 | ||
417 | if (can_be_decrypted) { | 417 | if (can_be_decrypted) { |
418 | int idx = 0; | ||
419 | if (skb->len >= hdrlen + 3) { | 418 | if (skb->len >= hdrlen + 3) { |
420 | /* Top two-bits of byte 3 are the key index */ | 419 | /* Top two-bits of byte 3 are the key index */ |
421 | idx = skb->data[hdrlen + 3] >> 6; | 420 | keyidx = skb->data[hdrlen + 3] >> 6; |
422 | } | 421 | } |
423 | 422 | ||
424 | /* ieee->crypt[] is WEP_KEY (4) in length. Given that idx | 423 | /* ieee->crypt[] is WEP_KEY (4) in length. Given that keyidx |
425 | * is only allowed 2-bits of storage, no value of idx can | 424 | * is only allowed 2-bits of storage, no value of keyidx can |
426 | * be provided via above code that would result in idx | 425 | * be provided via above code that would result in keyidx |
427 | * being out of range */ | 426 | * being out of range */ |
428 | crypt = ieee->crypt[idx]; | 427 | crypt = ieee->crypt[keyidx]; |
429 | 428 | ||
430 | #ifdef NOT_YET | 429 | #ifdef NOT_YET |
431 | sta = NULL; | 430 | sta = NULL; |
@@ -479,6 +478,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
479 | goto rx_exit; | 478 | goto rx_exit; |
480 | } | 479 | } |
481 | #endif | 480 | #endif |
481 | /* drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.29) */ | ||
482 | if (sc == ieee->prev_seq_ctl) | ||
483 | goto rx_dropped; | ||
484 | else | ||
485 | ieee->prev_seq_ctl = sc; | ||
482 | 486 | ||
483 | /* Data frame - extract src/dst addresses */ | 487 | /* Data frame - extract src/dst addresses */ |
484 | if (skb->len < IEEE80211_3ADDR_LEN) | 488 | if (skb->len < IEEE80211_3ADDR_LEN) |
@@ -655,6 +659,51 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
655 | goto rx_dropped; | 659 | goto rx_dropped; |
656 | } | 660 | } |
657 | 661 | ||
662 | /* If the frame was decrypted in hardware, we may need to strip off | ||
663 | * any security data (IV, ICV, etc) that was left behind */ | ||
664 | if (!can_be_decrypted && (fc & IEEE80211_FCTL_PROTECTED) && | ||
665 | ieee->host_strip_iv_icv) { | ||
666 | int trimlen = 0; | ||
667 | |||
668 | /* Top two-bits of byte 3 are the key index */ | ||
669 | if (skb->len >= hdrlen + 3) | ||
670 | keyidx = skb->data[hdrlen + 3] >> 6; | ||
671 | |||
672 | /* To strip off any security data which appears before the | ||
673 | * payload, we simply increase hdrlen (as the header gets | ||
674 | * chopped off immediately below). For the security data which | ||
675 | * appears after the payload, we use skb_trim. */ | ||
676 | |||
677 | switch (ieee->sec.encode_alg[keyidx]) { | ||
678 | case SEC_ALG_WEP: | ||
679 | /* 4 byte IV */ | ||
680 | hdrlen += 4; | ||
681 | /* 4 byte ICV */ | ||
682 | trimlen = 4; | ||
683 | break; | ||
684 | case SEC_ALG_TKIP: | ||
685 | /* 4 byte IV, 4 byte ExtIV */ | ||
686 | hdrlen += 8; | ||
687 | /* 8 byte MIC, 4 byte ICV */ | ||
688 | trimlen = 12; | ||
689 | break; | ||
690 | case SEC_ALG_CCMP: | ||
691 | /* 8 byte CCMP header */ | ||
692 | hdrlen += 8; | ||
693 | /* 8 byte MIC */ | ||
694 | trimlen = 8; | ||
695 | break; | ||
696 | } | ||
697 | |||
698 | if (skb->len < trimlen) | ||
699 | goto rx_dropped; | ||
700 | |||
701 | __skb_trim(skb, skb->len - trimlen); | ||
702 | |||
703 | if (skb->len < hdrlen) | ||
704 | goto rx_dropped; | ||
705 | } | ||
706 | |||
658 | /* skb: hdr + (possible reassembled) full plaintext payload */ | 707 | /* skb: hdr + (possible reassembled) full plaintext payload */ |
659 | 708 | ||
660 | payload = skb->data + hdrlen; | 709 | payload = skb->data + hdrlen; |
@@ -1255,12 +1304,11 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element | |||
1255 | case MFIE_TYPE_IBSS_DFS: | 1304 | case MFIE_TYPE_IBSS_DFS: |
1256 | if (network->ibss_dfs) | 1305 | if (network->ibss_dfs) |
1257 | break; | 1306 | break; |
1258 | network->ibss_dfs = | 1307 | network->ibss_dfs = kmemdup(info_element->data, |
1259 | kmalloc(info_element->len, GFP_ATOMIC); | 1308 | info_element->len, |
1309 | GFP_ATOMIC); | ||
1260 | if (!network->ibss_dfs) | 1310 | if (!network->ibss_dfs) |
1261 | return 1; | 1311 | return 1; |
1262 | memcpy(network->ibss_dfs, info_element->data, | ||
1263 | info_element->len); | ||
1264 | network->flags |= NETWORK_HAS_IBSS_DFS; | 1312 | network->flags |= NETWORK_HAS_IBSS_DFS; |
1265 | break; | 1313 | break; |
1266 | 1314 | ||
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index ae254497ba3d..854fc13cd78d 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c | |||
@@ -390,7 +390,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
390 | * this stack is providing the full 802.11 header, one will | 390 | * this stack is providing the full 802.11 header, one will |
391 | * eventually be affixed to this fragment -- so we must account | 391 | * eventually be affixed to this fragment -- so we must account |
392 | * for it when determining the amount of payload space. */ | 392 | * for it when determining the amount of payload space. */ |
393 | bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN; | 393 | bytes_per_frag = frag_size - hdr_len; |
394 | if (ieee->config & | 394 | if (ieee->config & |
395 | (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) | 395 | (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) |
396 | bytes_per_frag -= IEEE80211_FCS_LEN; | 396 | bytes_per_frag -= IEEE80211_FCS_LEN; |
@@ -412,7 +412,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) | |||
412 | } else { | 412 | } else { |
413 | nr_frags = 1; | 413 | nr_frags = 1; |
414 | bytes_per_frag = bytes_last_frag = bytes; | 414 | bytes_per_frag = bytes_last_frag = bytes; |
415 | frag_size = bytes + IEEE80211_3ADDR_LEN; | 415 | frag_size = bytes + hdr_len; |
416 | } | 416 | } |
417 | 417 | ||
418 | rts_required = (frag_size > ieee->rts | 418 | rts_required = (frag_size > ieee->rts |
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index 2ae1833b657a..6012705aa4f8 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c | |||
@@ -161,7 +161,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) | |||
161 | /* Make sure that we've got an auth queue item for this request */ | 161 | /* Make sure that we've got an auth queue item for this request */ |
162 | if(aq == NULL) | 162 | if(aq == NULL) |
163 | { | 163 | { |
164 | printkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2)); | 164 | dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2)); |
165 | /* Error #? */ | 165 | /* Error #? */ |
166 | return -1; | 166 | return -1; |
167 | } | 167 | } |
@@ -169,7 +169,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) | |||
169 | /* Check for out of order authentication */ | 169 | /* Check for out of order authentication */ |
170 | if(!net->authenticating) | 170 | if(!net->authenticating) |
171 | { | 171 | { |
172 | printkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but did not request authentication.\n",MAC_ARG(auth->header.addr2)); | 172 | dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but did not request authentication.\n",MAC_ARG(auth->header.addr2)); |
173 | return -1; | 173 | return -1; |
174 | } | 174 | } |
175 | 175 | ||
@@ -219,10 +219,16 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) | |||
219 | net->challenge_len = *data++; | 219 | net->challenge_len = *data++; |
220 | if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) | 220 | if (net->challenge_len > WLAN_AUTH_CHALLENGE_LEN) |
221 | net->challenge_len = WLAN_AUTH_CHALLENGE_LEN; | 221 | net->challenge_len = WLAN_AUTH_CHALLENGE_LEN; |
222 | if (net->challenge != NULL) | 222 | kfree(net->challenge); |
223 | kfree(net->challenge); | 223 | net->challenge = kmemdup(data, net->challenge_len, |
224 | net->challenge = kmalloc(net->challenge_len, GFP_ATOMIC); | 224 | GFP_ATOMIC); |
225 | memcpy(net->challenge, data, net->challenge_len); | 225 | if (net->challenge == NULL) { |
226 | printkl(KERN_NOTICE PFX "Shared Key " | ||
227 | "Authentication failed due to " | ||
228 | "memory shortage.\n"); | ||
229 | spin_unlock_irqrestore(&mac->lock, flags); | ||
230 | break; | ||
231 | } | ||
226 | aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; | 232 | aq->state = IEEE80211SOFTMAC_AUTH_SHARED_RESPONSE; |
227 | 233 | ||
228 | /* We reuse the work struct from the auth request here. | 234 | /* We reuse the work struct from the auth request here. |
@@ -345,7 +351,7 @@ ieee80211softmac_deauth_req(struct ieee80211softmac_device *mac, | |||
345 | /* Make sure the network is authenticated */ | 351 | /* Make sure the network is authenticated */ |
346 | if (!net->authenticated) | 352 | if (!net->authenticated) |
347 | { | 353 | { |
348 | printkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n"); | 354 | dprintkl(KERN_DEBUG PFX "Can't send deauthentication packet, network is not authenticated.\n"); |
349 | /* Error okay? */ | 355 | /* Error okay? */ |
350 | return -EPERM; | 356 | return -EPERM; |
351 | } | 357 | } |
@@ -379,7 +385,7 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de | |||
379 | net = ieee80211softmac_get_network_by_bssid(mac, deauth->header.addr2); | 385 | net = ieee80211softmac_get_network_by_bssid(mac, deauth->header.addr2); |
380 | 386 | ||
381 | if (net == NULL) { | 387 | if (net == NULL) { |
382 | printkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n", | 388 | dprintkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n", |
383 | MAC_ARG(deauth->header.addr2)); | 389 | MAC_ARG(deauth->header.addr2)); |
384 | return 0; | 390 | return 0; |
385 | } | 391 | } |
@@ -387,7 +393,7 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de | |||
387 | /* Make sure the network is authenticated */ | 393 | /* Make sure the network is authenticated */ |
388 | if(!net->authenticated) | 394 | if(!net->authenticated) |
389 | { | 395 | { |
390 | printkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n"); | 396 | dprintkl(KERN_DEBUG PFX "Can't perform deauthentication, network is not authenticated.\n"); |
391 | /* Error okay? */ | 397 | /* Error okay? */ |
392 | return -EPERM; | 398 | return -EPERM; |
393 | } | 399 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c index a8326076581a..0c85d6c24cdb 100644 --- a/net/ieee80211/softmac/ieee80211softmac_scan.c +++ b/net/ieee80211/softmac/ieee80211softmac_scan.c | |||
@@ -47,7 +47,6 @@ ieee80211softmac_start_scan(struct ieee80211softmac_device *sm) | |||
47 | sm->scanning = 1; | 47 | sm->scanning = 1; |
48 | spin_unlock_irqrestore(&sm->lock, flags); | 48 | spin_unlock_irqrestore(&sm->lock, flags); |
49 | 49 | ||
50 | netif_tx_disable(sm->ieee->dev); | ||
51 | ret = sm->start_scan(sm->dev); | 50 | ret = sm->start_scan(sm->dev); |
52 | if (ret) { | 51 | if (ret) { |
53 | spin_lock_irqsave(&sm->lock, flags); | 52 | spin_lock_irqsave(&sm->lock, flags); |
@@ -137,7 +136,8 @@ void ieee80211softmac_scan(struct work_struct *work) | |||
137 | si->started = 0; | 136 | si->started = 0; |
138 | spin_unlock_irqrestore(&sm->lock, flags); | 137 | spin_unlock_irqrestore(&sm->lock, flags); |
139 | 138 | ||
140 | dprintk(PFX "Scanning finished\n"); | 139 | dprintk(PFX "Scanning finished: scanned %d channels starting with channel %d\n", |
140 | sm->scaninfo->number_channels, sm->scaninfo->channels[0].channel); | ||
141 | ieee80211softmac_scan_finished(sm); | 141 | ieee80211softmac_scan_finished(sm); |
142 | complete_all(&sm->scaninfo->finished); | 142 | complete_all(&sm->scaninfo->finished); |
143 | } | 143 | } |
@@ -186,8 +186,6 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev) | |||
186 | sm->scaninfo->channels = sm->ieee->geo.bg; | 186 | sm->scaninfo->channels = sm->ieee->geo.bg; |
187 | sm->scaninfo->number_channels = sm->ieee->geo.bg_channels; | 187 | sm->scaninfo->number_channels = sm->ieee->geo.bg_channels; |
188 | } | 188 | } |
189 | dprintk(PFX "Start scanning with channel: %d\n", sm->scaninfo->channels[0].channel); | ||
190 | dprintk(PFX "Scanning %d channels\n", sm->scaninfo->number_channels); | ||
191 | sm->scaninfo->current_channel_idx = 0; | 189 | sm->scaninfo->current_channel_idx = 0; |
192 | sm->scaninfo->started = 1; | 190 | sm->scaninfo->started = 1; |
193 | sm->scaninfo->stop = 0; | 191 | sm->scaninfo->stop = 0; |
@@ -251,7 +249,6 @@ void ieee80211softmac_scan_finished(struct ieee80211softmac_device *sm) | |||
251 | if (net) | 249 | if (net) |
252 | sm->set_channel(sm->dev, net->channel); | 250 | sm->set_channel(sm->dev, net->channel); |
253 | } | 251 | } |
254 | netif_wake_queue(sm->ieee->dev); | ||
255 | ieee80211softmac_call_events(sm, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, NULL); | 252 | ieee80211softmac_call_events(sm, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, NULL); |
256 | } | 253 | } |
257 | EXPORT_SYMBOL_GPL(ieee80211softmac_scan_finished); | 254 | EXPORT_SYMBOL_GPL(ieee80211softmac_scan_finished); |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 5572071af735..503e7059e312 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -104,13 +104,6 @@ config IP_MULTIPLE_TABLES | |||
104 | 104 | ||
105 | If unsure, say N. | 105 | If unsure, say N. |
106 | 106 | ||
107 | config IP_ROUTE_FWMARK | ||
108 | bool "IP: use netfilter MARK value as routing key" | ||
109 | depends on IP_MULTIPLE_TABLES && NETFILTER | ||
110 | help | ||
111 | If you say Y here, you will be able to specify different routes for | ||
112 | packets with different mark values (see iptables(8), MARK target). | ||
113 | |||
114 | config IP_ROUTE_MULTIPATH | 107 | config IP_ROUTE_MULTIPATH |
115 | bool "IP: equal cost multipath" | 108 | bool "IP: equal cost multipath" |
116 | depends on IP_ADVANCED_ROUTER | 109 | depends on IP_ADVANCED_ROUTER |
@@ -625,5 +618,17 @@ config DEFAULT_TCP_CONG | |||
625 | default "reno" if DEFAULT_RENO | 618 | default "reno" if DEFAULT_RENO |
626 | default "cubic" | 619 | default "cubic" |
627 | 620 | ||
621 | config TCP_MD5SIG | ||
622 | bool "TCP: MD5 Signature Option support (RFC2385) (EXPERIMENTAL)" | ||
623 | depends on EXPERIMENTAL | ||
624 | select CRYPTO | ||
625 | select CRYPTO_MD5 | ||
626 | ---help--- | ||
627 | RFC2385 specifices a method of giving MD5 protection to TCP sessions. | ||
628 | Its main (only?) use is to protect BGP sessions between core routers | ||
629 | on the Internet. | ||
630 | |||
631 | If unsure, say N. | ||
632 | |||
628 | source "net/ipv4/ipvs/Kconfig" | 633 | source "net/ipv4/ipvs/Kconfig" |
629 | 634 | ||
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 15645c51520c..7a068626feea 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile | |||
@@ -8,7 +8,8 @@ obj-y := route.o inetpeer.o protocol.o \ | |||
8 | inet_timewait_sock.o inet_connection_sock.o \ | 8 | inet_timewait_sock.o inet_connection_sock.o \ |
9 | tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \ | 9 | tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \ |
10 | tcp_minisocks.o tcp_cong.o \ | 10 | tcp_minisocks.o tcp_cong.o \ |
11 | datagram.o raw.o udp.o arp.o icmp.o devinet.o af_inet.o igmp.o \ | 11 | datagram.o raw.o udp.o udplite.o \ |
12 | arp.o icmp.o devinet.o af_inet.o igmp.o \ | ||
12 | sysctl_net_ipv4.o fib_frontend.o fib_semantics.o | 13 | sysctl_net_ipv4.o fib_frontend.o fib_semantics.o |
13 | 14 | ||
14 | obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o | 15 | obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index edcf0932ac6d..1144900d37f6 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -104,6 +104,7 @@ | |||
104 | #include <net/inet_connection_sock.h> | 104 | #include <net/inet_connection_sock.h> |
105 | #include <net/tcp.h> | 105 | #include <net/tcp.h> |
106 | #include <net/udp.h> | 106 | #include <net/udp.h> |
107 | #include <net/udplite.h> | ||
107 | #include <linux/skbuff.h> | 108 | #include <linux/skbuff.h> |
108 | #include <net/sock.h> | 109 | #include <net/sock.h> |
109 | #include <net/raw.h> | 110 | #include <net/raw.h> |
@@ -204,7 +205,7 @@ int inet_listen(struct socket *sock, int backlog) | |||
204 | * we can only allow the backlog to be adjusted. | 205 | * we can only allow the backlog to be adjusted. |
205 | */ | 206 | */ |
206 | if (old_state != TCP_LISTEN) { | 207 | if (old_state != TCP_LISTEN) { |
207 | err = inet_csk_listen_start(sk, TCP_SYNQ_HSIZE); | 208 | err = inet_csk_listen_start(sk, backlog); |
208 | if (err) | 209 | if (err) |
209 | goto out; | 210 | goto out; |
210 | } | 211 | } |
@@ -643,7 +644,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr, | |||
643 | sin->sin_port = inet->dport; | 644 | sin->sin_port = inet->dport; |
644 | sin->sin_addr.s_addr = inet->daddr; | 645 | sin->sin_addr.s_addr = inet->daddr; |
645 | } else { | 646 | } else { |
646 | __u32 addr = inet->rcv_saddr; | 647 | __be32 addr = inet->rcv_saddr; |
647 | if (!addr) | 648 | if (!addr) |
648 | addr = inet->saddr; | 649 | addr = inet->saddr; |
649 | sin->sin_port = inet->sport; | 650 | sin->sin_port = inet->sport; |
@@ -994,8 +995,8 @@ static int inet_sk_reselect_saddr(struct sock *sk) | |||
994 | struct inet_sock *inet = inet_sk(sk); | 995 | struct inet_sock *inet = inet_sk(sk); |
995 | int err; | 996 | int err; |
996 | struct rtable *rt; | 997 | struct rtable *rt; |
997 | __u32 old_saddr = inet->saddr; | 998 | __be32 old_saddr = inet->saddr; |
998 | __u32 new_saddr; | 999 | __be32 new_saddr; |
999 | __be32 daddr = inet->daddr; | 1000 | __be32 daddr = inet->daddr; |
1000 | 1001 | ||
1001 | if (inet->opt && inet->opt->srr) | 1002 | if (inet->opt && inet->opt->srr) |
@@ -1223,10 +1224,13 @@ static int __init init_ipv4_mibs(void) | |||
1223 | tcp_statistics[1] = alloc_percpu(struct tcp_mib); | 1224 | tcp_statistics[1] = alloc_percpu(struct tcp_mib); |
1224 | udp_statistics[0] = alloc_percpu(struct udp_mib); | 1225 | udp_statistics[0] = alloc_percpu(struct udp_mib); |
1225 | udp_statistics[1] = alloc_percpu(struct udp_mib); | 1226 | udp_statistics[1] = alloc_percpu(struct udp_mib); |
1227 | udplite_statistics[0] = alloc_percpu(struct udp_mib); | ||
1228 | udplite_statistics[1] = alloc_percpu(struct udp_mib); | ||
1226 | if (! | 1229 | if (! |
1227 | (net_statistics[0] && net_statistics[1] && ip_statistics[0] | 1230 | (net_statistics[0] && net_statistics[1] && ip_statistics[0] |
1228 | && ip_statistics[1] && tcp_statistics[0] && tcp_statistics[1] | 1231 | && ip_statistics[1] && tcp_statistics[0] && tcp_statistics[1] |
1229 | && udp_statistics[0] && udp_statistics[1])) | 1232 | && udp_statistics[0] && udp_statistics[1] |
1233 | && udplite_statistics[0] && udplite_statistics[1] ) ) | ||
1230 | return -ENOMEM; | 1234 | return -ENOMEM; |
1231 | 1235 | ||
1232 | (void) tcp_mib_init(); | 1236 | (void) tcp_mib_init(); |
@@ -1313,6 +1317,8 @@ static int __init inet_init(void) | |||
1313 | /* Setup TCP slab cache for open requests. */ | 1317 | /* Setup TCP slab cache for open requests. */ |
1314 | tcp_init(); | 1318 | tcp_init(); |
1315 | 1319 | ||
1320 | /* Add UDP-Lite (RFC 3828) */ | ||
1321 | udplite4_register(); | ||
1316 | 1322 | ||
1317 | /* | 1323 | /* |
1318 | * Set the ICMP layer up | 1324 | * Set the ICMP layer up |
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 99542977e47e..67a5509e26fc 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -14,7 +14,7 @@ | |||
14 | * into IP header for icv calculation. Options are already checked | 14 | * into IP header for icv calculation. Options are already checked |
15 | * for validity, so paranoia is not required. */ | 15 | * for validity, so paranoia is not required. */ |
16 | 16 | ||
17 | static int ip_clear_mutable_options(struct iphdr *iph, u32 *daddr) | 17 | static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr) |
18 | { | 18 | { |
19 | unsigned char * optptr = (unsigned char*)(iph+1); | 19 | unsigned char * optptr = (unsigned char*)(iph+1); |
20 | int l = iph->ihl*4 - sizeof(struct iphdr); | 20 | int l = iph->ihl*4 - sizeof(struct iphdr); |
@@ -162,7 +162,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) | |||
162 | iph->frag_off = 0; | 162 | iph->frag_off = 0; |
163 | iph->check = 0; | 163 | iph->check = 0; |
164 | if (ihl > sizeof(*iph)) { | 164 | if (ihl > sizeof(*iph)) { |
165 | u32 dummy; | 165 | __be32 dummy; |
166 | if (ip_clear_mutable_options(iph, &dummy)) | 166 | if (ip_clear_mutable_options(iph, &dummy)) |
167 | goto out; | 167 | goto out; |
168 | } | 168 | } |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index cfb5d3de9c84..3981e8be9ab8 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -203,7 +203,7 @@ struct neigh_table arp_tbl = { | |||
203 | .gc_thresh3 = 1024, | 203 | .gc_thresh3 = 1024, |
204 | }; | 204 | }; |
205 | 205 | ||
206 | int arp_mc_map(u32 addr, u8 *haddr, struct net_device *dev, int dir) | 206 | int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir) |
207 | { | 207 | { |
208 | switch (dev->type) { | 208 | switch (dev->type) { |
209 | case ARPHRD_ETHER: | 209 | case ARPHRD_ETHER: |
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 6460233407c7..60aafb4a8adf 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -319,6 +319,7 @@ static int cipso_v4_cache_check(const unsigned char *key, | |||
319 | entry->activity += 1; | 319 | entry->activity += 1; |
320 | atomic_inc(&entry->lsm_data->refcount); | 320 | atomic_inc(&entry->lsm_data->refcount); |
321 | secattr->cache = entry->lsm_data; | 321 | secattr->cache = entry->lsm_data; |
322 | secattr->flags |= NETLBL_SECATTR_CACHE; | ||
322 | if (prev_entry == NULL) { | 323 | if (prev_entry == NULL) { |
323 | spin_unlock_bh(&cipso_v4_cache[bkt].lock); | 324 | spin_unlock_bh(&cipso_v4_cache[bkt].lock); |
324 | return 0; | 325 | return 0; |
@@ -377,12 +378,11 @@ int cipso_v4_cache_add(const struct sk_buff *skb, | |||
377 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | 378 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
378 | if (entry == NULL) | 379 | if (entry == NULL) |
379 | return -ENOMEM; | 380 | return -ENOMEM; |
380 | entry->key = kmalloc(cipso_ptr_len, GFP_ATOMIC); | 381 | entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); |
381 | if (entry->key == NULL) { | 382 | if (entry->key == NULL) { |
382 | ret_val = -ENOMEM; | 383 | ret_val = -ENOMEM; |
383 | goto cache_add_failure; | 384 | goto cache_add_failure; |
384 | } | 385 | } |
385 | memcpy(entry->key, cipso_ptr, cipso_ptr_len); | ||
386 | entry->key_len = cipso_ptr_len; | 386 | entry->key_len = cipso_ptr_len; |
387 | entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); | 387 | entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); |
388 | atomic_inc(&secattr->cache->refcount); | 388 | atomic_inc(&secattr->cache->refcount); |
@@ -447,8 +447,30 @@ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) | |||
447 | */ | 447 | */ |
448 | int cipso_v4_doi_add(struct cipso_v4_doi *doi_def) | 448 | int cipso_v4_doi_add(struct cipso_v4_doi *doi_def) |
449 | { | 449 | { |
450 | u32 iter; | ||
451 | |||
450 | if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) | 452 | if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) |
451 | return -EINVAL; | 453 | return -EINVAL; |
454 | for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { | ||
455 | switch (doi_def->tags[iter]) { | ||
456 | case CIPSO_V4_TAG_RBITMAP: | ||
457 | break; | ||
458 | case CIPSO_V4_TAG_RANGE: | ||
459 | if (doi_def->type != CIPSO_V4_MAP_PASS) | ||
460 | return -EINVAL; | ||
461 | break; | ||
462 | case CIPSO_V4_TAG_INVALID: | ||
463 | if (iter == 0) | ||
464 | return -EINVAL; | ||
465 | break; | ||
466 | case CIPSO_V4_TAG_ENUM: | ||
467 | if (doi_def->type != CIPSO_V4_MAP_PASS) | ||
468 | return -EINVAL; | ||
469 | break; | ||
470 | default: | ||
471 | return -EINVAL; | ||
472 | } | ||
473 | } | ||
452 | 474 | ||
453 | doi_def->valid = 1; | 475 | doi_def->valid = 1; |
454 | INIT_RCU_HEAD(&doi_def->rcu); | 476 | INIT_RCU_HEAD(&doi_def->rcu); |
@@ -805,8 +827,7 @@ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, | |||
805 | /** | 827 | /** |
806 | * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network | 828 | * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network |
807 | * @doi_def: the DOI definition | 829 | * @doi_def: the DOI definition |
808 | * @host_cat: the category bitmap in host format | 830 | * @secattr: the security attributes |
809 | * @host_cat_len: the length of the host's category bitmap in bytes | ||
810 | * @net_cat: the zero'd out category bitmap in network/CIPSO format | 831 | * @net_cat: the zero'd out category bitmap in network/CIPSO format |
811 | * @net_cat_len: the length of the CIPSO bitmap in bytes | 832 | * @net_cat_len: the length of the CIPSO bitmap in bytes |
812 | * | 833 | * |
@@ -817,59 +838,51 @@ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, | |||
817 | * | 838 | * |
818 | */ | 839 | */ |
819 | static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, | 840 | static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, |
820 | const unsigned char *host_cat, | 841 | const struct netlbl_lsm_secattr *secattr, |
821 | u32 host_cat_len, | ||
822 | unsigned char *net_cat, | 842 | unsigned char *net_cat, |
823 | u32 net_cat_len) | 843 | u32 net_cat_len) |
824 | { | 844 | { |
825 | int host_spot = -1; | 845 | int host_spot = -1; |
826 | u32 net_spot; | 846 | u32 net_spot = CIPSO_V4_INV_CAT; |
827 | u32 net_spot_max = 0; | 847 | u32 net_spot_max = 0; |
828 | u32 host_clen_bits = host_cat_len * 8; | ||
829 | u32 net_clen_bits = net_cat_len * 8; | 848 | u32 net_clen_bits = net_cat_len * 8; |
830 | u32 host_cat_size; | 849 | u32 host_cat_size = 0; |
831 | u32 *host_cat_array; | 850 | u32 *host_cat_array = NULL; |
832 | 851 | ||
833 | switch (doi_def->type) { | 852 | if (doi_def->type == CIPSO_V4_MAP_STD) { |
834 | case CIPSO_V4_MAP_PASS: | ||
835 | net_spot_max = host_cat_len; | ||
836 | while (net_spot_max > 0 && host_cat[net_spot_max - 1] == 0) | ||
837 | net_spot_max--; | ||
838 | if (net_spot_max > net_cat_len) | ||
839 | return -EINVAL; | ||
840 | memcpy(net_cat, host_cat, net_spot_max); | ||
841 | return net_spot_max; | ||
842 | case CIPSO_V4_MAP_STD: | ||
843 | host_cat_size = doi_def->map.std->cat.local_size; | 853 | host_cat_size = doi_def->map.std->cat.local_size; |
844 | host_cat_array = doi_def->map.std->cat.local; | 854 | host_cat_array = doi_def->map.std->cat.local; |
845 | for (;;) { | 855 | } |
846 | host_spot = cipso_v4_bitmap_walk(host_cat, | 856 | |
847 | host_clen_bits, | 857 | for (;;) { |
848 | host_spot + 1, | 858 | host_spot = netlbl_secattr_catmap_walk(secattr->mls_cat, |
849 | 1); | 859 | host_spot + 1); |
850 | if (host_spot < 0) | 860 | if (host_spot < 0) |
851 | break; | 861 | break; |
862 | |||
863 | switch (doi_def->type) { | ||
864 | case CIPSO_V4_MAP_PASS: | ||
865 | net_spot = host_spot; | ||
866 | break; | ||
867 | case CIPSO_V4_MAP_STD: | ||
852 | if (host_spot >= host_cat_size) | 868 | if (host_spot >= host_cat_size) |
853 | return -EPERM; | 869 | return -EPERM; |
854 | |||
855 | net_spot = host_cat_array[host_spot]; | 870 | net_spot = host_cat_array[host_spot]; |
856 | if (net_spot >= net_clen_bits) | 871 | if (net_spot >= CIPSO_V4_INV_CAT) |
857 | return -ENOSPC; | 872 | return -EPERM; |
858 | cipso_v4_bitmap_setbit(net_cat, net_spot, 1); | 873 | break; |
859 | |||
860 | if (net_spot > net_spot_max) | ||
861 | net_spot_max = net_spot; | ||
862 | } | 874 | } |
875 | if (net_spot >= net_clen_bits) | ||
876 | return -ENOSPC; | ||
877 | cipso_v4_bitmap_setbit(net_cat, net_spot, 1); | ||
863 | 878 | ||
864 | if (host_spot == -2) | 879 | if (net_spot > net_spot_max) |
865 | return -EFAULT; | 880 | net_spot_max = net_spot; |
866 | |||
867 | if (++net_spot_max % 8) | ||
868 | return net_spot_max / 8 + 1; | ||
869 | return net_spot_max / 8; | ||
870 | } | 881 | } |
871 | 882 | ||
872 | return -EINVAL; | 883 | if (++net_spot_max % 8) |
884 | return net_spot_max / 8 + 1; | ||
885 | return net_spot_max / 8; | ||
873 | } | 886 | } |
874 | 887 | ||
875 | /** | 888 | /** |
@@ -877,102 +890,333 @@ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, | |||
877 | * @doi_def: the DOI definition | 890 | * @doi_def: the DOI definition |
878 | * @net_cat: the category bitmap in network/CIPSO format | 891 | * @net_cat: the category bitmap in network/CIPSO format |
879 | * @net_cat_len: the length of the CIPSO bitmap in bytes | 892 | * @net_cat_len: the length of the CIPSO bitmap in bytes |
880 | * @host_cat: the zero'd out category bitmap in host format | 893 | * @secattr: the security attributes |
881 | * @host_cat_len: the length of the host's category bitmap in bytes | ||
882 | * | 894 | * |
883 | * Description: | 895 | * Description: |
884 | * Perform a label mapping to translate a CIPSO bitmap to the correct local | 896 | * Perform a label mapping to translate a CIPSO bitmap to the correct local |
885 | * MLS category bitmap using the given DOI definition. Returns the minimum | 897 | * MLS category bitmap using the given DOI definition. Returns zero on |
886 | * size in bytes of the host bitmap on success, negative values otherwise. | 898 | * success, negative values on failure. |
887 | * | 899 | * |
888 | */ | 900 | */ |
889 | static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, | 901 | static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, |
890 | const unsigned char *net_cat, | 902 | const unsigned char *net_cat, |
891 | u32 net_cat_len, | 903 | u32 net_cat_len, |
892 | unsigned char *host_cat, | 904 | struct netlbl_lsm_secattr *secattr) |
893 | u32 host_cat_len) | ||
894 | { | 905 | { |
895 | u32 host_spot; | 906 | int ret_val; |
896 | u32 host_spot_max = 0; | ||
897 | int net_spot = -1; | 907 | int net_spot = -1; |
908 | u32 host_spot = CIPSO_V4_INV_CAT; | ||
898 | u32 net_clen_bits = net_cat_len * 8; | 909 | u32 net_clen_bits = net_cat_len * 8; |
899 | u32 host_clen_bits = host_cat_len * 8; | 910 | u32 net_cat_size = 0; |
900 | u32 net_cat_size; | 911 | u32 *net_cat_array = NULL; |
901 | u32 *net_cat_array; | ||
902 | 912 | ||
903 | switch (doi_def->type) { | 913 | if (doi_def->type == CIPSO_V4_MAP_STD) { |
904 | case CIPSO_V4_MAP_PASS: | ||
905 | if (net_cat_len > host_cat_len) | ||
906 | return -EINVAL; | ||
907 | memcpy(host_cat, net_cat, net_cat_len); | ||
908 | return net_cat_len; | ||
909 | case CIPSO_V4_MAP_STD: | ||
910 | net_cat_size = doi_def->map.std->cat.cipso_size; | 914 | net_cat_size = doi_def->map.std->cat.cipso_size; |
911 | net_cat_array = doi_def->map.std->cat.cipso; | 915 | net_cat_array = doi_def->map.std->cat.cipso; |
912 | for (;;) { | 916 | } |
913 | net_spot = cipso_v4_bitmap_walk(net_cat, | ||
914 | net_clen_bits, | ||
915 | net_spot + 1, | ||
916 | 1); | ||
917 | if (net_spot < 0) | ||
918 | break; | ||
919 | if (net_spot >= net_cat_size || | ||
920 | net_cat_array[net_spot] >= CIPSO_V4_INV_CAT) | ||
921 | return -EPERM; | ||
922 | 917 | ||
923 | host_spot = net_cat_array[net_spot]; | 918 | for (;;) { |
924 | if (host_spot >= host_clen_bits) | 919 | net_spot = cipso_v4_bitmap_walk(net_cat, |
925 | return -ENOSPC; | 920 | net_clen_bits, |
926 | cipso_v4_bitmap_setbit(host_cat, host_spot, 1); | 921 | net_spot + 1, |
922 | 1); | ||
923 | if (net_spot < 0) { | ||
924 | if (net_spot == -2) | ||
925 | return -EFAULT; | ||
926 | return 0; | ||
927 | } | ||
927 | 928 | ||
928 | if (host_spot > host_spot_max) | 929 | switch (doi_def->type) { |
929 | host_spot_max = host_spot; | 930 | case CIPSO_V4_MAP_PASS: |
931 | host_spot = net_spot; | ||
932 | break; | ||
933 | case CIPSO_V4_MAP_STD: | ||
934 | if (net_spot >= net_cat_size) | ||
935 | return -EPERM; | ||
936 | host_spot = net_cat_array[net_spot]; | ||
937 | if (host_spot >= CIPSO_V4_INV_CAT) | ||
938 | return -EPERM; | ||
939 | break; | ||
930 | } | 940 | } |
941 | ret_val = netlbl_secattr_catmap_setbit(secattr->mls_cat, | ||
942 | host_spot, | ||
943 | GFP_ATOMIC); | ||
944 | if (ret_val != 0) | ||
945 | return ret_val; | ||
946 | } | ||
947 | |||
948 | return -EINVAL; | ||
949 | } | ||
950 | |||
951 | /** | ||
952 | * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid | ||
953 | * @doi_def: the DOI definition | ||
954 | * @enumcat: category list | ||
955 | * @enumcat_len: length of the category list in bytes | ||
956 | * | ||
957 | * Description: | ||
958 | * Checks the given categories against the given DOI definition and returns a | ||
959 | * negative value if any of the categories do not have a valid mapping and a | ||
960 | * zero value if all of the categories are valid. | ||
961 | * | ||
962 | */ | ||
963 | static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, | ||
964 | const unsigned char *enumcat, | ||
965 | u32 enumcat_len) | ||
966 | { | ||
967 | u16 cat; | ||
968 | int cat_prev = -1; | ||
969 | u32 iter; | ||
970 | |||
971 | if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) | ||
972 | return -EFAULT; | ||
973 | |||
974 | for (iter = 0; iter < enumcat_len; iter += 2) { | ||
975 | cat = ntohs(*((__be16 *)&enumcat[iter])); | ||
976 | if (cat <= cat_prev) | ||
977 | return -EFAULT; | ||
978 | cat_prev = cat; | ||
979 | } | ||
980 | |||
981 | return 0; | ||
982 | } | ||
983 | |||
984 | /** | ||
985 | * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network | ||
986 | * @doi_def: the DOI definition | ||
987 | * @secattr: the security attributes | ||
988 | * @net_cat: the zero'd out category list in network/CIPSO format | ||
989 | * @net_cat_len: the length of the CIPSO category list in bytes | ||
990 | * | ||
991 | * Description: | ||
992 | * Perform a label mapping to translate a local MLS category bitmap to the | ||
993 | * correct CIPSO category list using the given DOI definition. Returns the | ||
994 | * size in bytes of the network category bitmap on success, negative values | ||
995 | * otherwise. | ||
996 | * | ||
997 | */ | ||
998 | static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, | ||
999 | const struct netlbl_lsm_secattr *secattr, | ||
1000 | unsigned char *net_cat, | ||
1001 | u32 net_cat_len) | ||
1002 | { | ||
1003 | int cat = -1; | ||
1004 | u32 cat_iter = 0; | ||
1005 | |||
1006 | for (;;) { | ||
1007 | cat = netlbl_secattr_catmap_walk(secattr->mls_cat, cat + 1); | ||
1008 | if (cat < 0) | ||
1009 | break; | ||
1010 | if ((cat_iter + 2) > net_cat_len) | ||
1011 | return -ENOSPC; | ||
1012 | |||
1013 | *((__be16 *)&net_cat[cat_iter]) = htons(cat); | ||
1014 | cat_iter += 2; | ||
1015 | } | ||
1016 | |||
1017 | return cat_iter; | ||
1018 | } | ||
1019 | |||
1020 | /** | ||
1021 | * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host | ||
1022 | * @doi_def: the DOI definition | ||
1023 | * @net_cat: the category list in network/CIPSO format | ||
1024 | * @net_cat_len: the length of the CIPSO bitmap in bytes | ||
1025 | * @secattr: the security attributes | ||
1026 | * | ||
1027 | * Description: | ||
1028 | * Perform a label mapping to translate a CIPSO category list to the correct | ||
1029 | * local MLS category bitmap using the given DOI definition. Returns zero on | ||
1030 | * success, negative values on failure. | ||
1031 | * | ||
1032 | */ | ||
1033 | static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, | ||
1034 | const unsigned char *net_cat, | ||
1035 | u32 net_cat_len, | ||
1036 | struct netlbl_lsm_secattr *secattr) | ||
1037 | { | ||
1038 | int ret_val; | ||
1039 | u32 iter; | ||
1040 | |||
1041 | for (iter = 0; iter < net_cat_len; iter += 2) { | ||
1042 | ret_val = netlbl_secattr_catmap_setbit(secattr->mls_cat, | ||
1043 | ntohs(*((__be16 *)&net_cat[iter])), | ||
1044 | GFP_ATOMIC); | ||
1045 | if (ret_val != 0) | ||
1046 | return ret_val; | ||
1047 | } | ||
1048 | |||
1049 | return 0; | ||
1050 | } | ||
1051 | |||
1052 | /** | ||
1053 | * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid | ||
1054 | * @doi_def: the DOI definition | ||
1055 | * @rngcat: category list | ||
1056 | * @rngcat_len: length of the category list in bytes | ||
1057 | * | ||
1058 | * Description: | ||
1059 | * Checks the given categories against the given DOI definition and returns a | ||
1060 | * negative value if any of the categories do not have a valid mapping and a | ||
1061 | * zero value if all of the categories are valid. | ||
1062 | * | ||
1063 | */ | ||
1064 | static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, | ||
1065 | const unsigned char *rngcat, | ||
1066 | u32 rngcat_len) | ||
1067 | { | ||
1068 | u16 cat_high; | ||
1069 | u16 cat_low; | ||
1070 | u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; | ||
1071 | u32 iter; | ||
931 | 1072 | ||
932 | if (net_spot == -2) | 1073 | if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) |
1074 | return -EFAULT; | ||
1075 | |||
1076 | for (iter = 0; iter < rngcat_len; iter += 4) { | ||
1077 | cat_high = ntohs(*((__be16 *)&rngcat[iter])); | ||
1078 | if ((iter + 4) <= rngcat_len) | ||
1079 | cat_low = ntohs(*((__be16 *)&rngcat[iter + 2])); | ||
1080 | else | ||
1081 | cat_low = 0; | ||
1082 | |||
1083 | if (cat_high > cat_prev) | ||
933 | return -EFAULT; | 1084 | return -EFAULT; |
934 | 1085 | ||
935 | if (++host_spot_max % 8) | 1086 | cat_prev = cat_low; |
936 | return host_spot_max / 8 + 1; | ||
937 | return host_spot_max / 8; | ||
938 | } | 1087 | } |
939 | 1088 | ||
940 | return -EINVAL; | 1089 | return 0; |
1090 | } | ||
1091 | |||
1092 | /** | ||
1093 | * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network | ||
1094 | * @doi_def: the DOI definition | ||
1095 | * @secattr: the security attributes | ||
1096 | * @net_cat: the zero'd out category list in network/CIPSO format | ||
1097 | * @net_cat_len: the length of the CIPSO category list in bytes | ||
1098 | * | ||
1099 | * Description: | ||
1100 | * Perform a label mapping to translate a local MLS category bitmap to the | ||
1101 | * correct CIPSO category list using the given DOI definition. Returns the | ||
1102 | * size in bytes of the network category bitmap on success, negative values | ||
1103 | * otherwise. | ||
1104 | * | ||
1105 | */ | ||
1106 | static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, | ||
1107 | const struct netlbl_lsm_secattr *secattr, | ||
1108 | unsigned char *net_cat, | ||
1109 | u32 net_cat_len) | ||
1110 | { | ||
1111 | /* The constant '16' is not random, it is the maximum number of | ||
1112 | * high/low category range pairs as permitted by the CIPSO draft based | ||
1113 | * on a maximum IPv4 header length of 60 bytes - the BUG_ON() assertion | ||
1114 | * does a sanity check to make sure we don't overflow the array. */ | ||
1115 | int iter = -1; | ||
1116 | u16 array[16]; | ||
1117 | u32 array_cnt = 0; | ||
1118 | u32 cat_size = 0; | ||
1119 | |||
1120 | BUG_ON(net_cat_len > 30); | ||
1121 | |||
1122 | for (;;) { | ||
1123 | iter = netlbl_secattr_catmap_walk(secattr->mls_cat, iter + 1); | ||
1124 | if (iter < 0) | ||
1125 | break; | ||
1126 | cat_size += (iter == 0 ? 0 : sizeof(u16)); | ||
1127 | if (cat_size > net_cat_len) | ||
1128 | return -ENOSPC; | ||
1129 | array[array_cnt++] = iter; | ||
1130 | |||
1131 | iter = netlbl_secattr_catmap_walk_rng(secattr->mls_cat, iter); | ||
1132 | if (iter < 0) | ||
1133 | return -EFAULT; | ||
1134 | cat_size += sizeof(u16); | ||
1135 | if (cat_size > net_cat_len) | ||
1136 | return -ENOSPC; | ||
1137 | array[array_cnt++] = iter; | ||
1138 | } | ||
1139 | |||
1140 | for (iter = 0; array_cnt > 0;) { | ||
1141 | *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); | ||
1142 | iter += 2; | ||
1143 | array_cnt--; | ||
1144 | if (array[array_cnt] != 0) { | ||
1145 | *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); | ||
1146 | iter += 2; | ||
1147 | } | ||
1148 | } | ||
1149 | |||
1150 | return cat_size; | ||
1151 | } | ||
1152 | |||
1153 | /** | ||
1154 | * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host | ||
1155 | * @doi_def: the DOI definition | ||
1156 | * @net_cat: the category list in network/CIPSO format | ||
1157 | * @net_cat_len: the length of the CIPSO bitmap in bytes | ||
1158 | * @secattr: the security attributes | ||
1159 | * | ||
1160 | * Description: | ||
1161 | * Perform a label mapping to translate a CIPSO category list to the correct | ||
1162 | * local MLS category bitmap using the given DOI definition. Returns zero on | ||
1163 | * success, negative values on failure. | ||
1164 | * | ||
1165 | */ | ||
1166 | static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, | ||
1167 | const unsigned char *net_cat, | ||
1168 | u32 net_cat_len, | ||
1169 | struct netlbl_lsm_secattr *secattr) | ||
1170 | { | ||
1171 | int ret_val; | ||
1172 | u32 net_iter; | ||
1173 | u16 cat_low; | ||
1174 | u16 cat_high; | ||
1175 | |||
1176 | for(net_iter = 0; net_iter < net_cat_len; net_iter += 4) { | ||
1177 | cat_high = ntohs(*((__be16 *)&net_cat[net_iter])); | ||
1178 | if ((net_iter + 4) <= net_cat_len) | ||
1179 | cat_low = ntohs(*((__be16 *)&net_cat[net_iter + 2])); | ||
1180 | else | ||
1181 | cat_low = 0; | ||
1182 | |||
1183 | ret_val = netlbl_secattr_catmap_setrng(secattr->mls_cat, | ||
1184 | cat_low, | ||
1185 | cat_high, | ||
1186 | GFP_ATOMIC); | ||
1187 | if (ret_val != 0) | ||
1188 | return ret_val; | ||
1189 | } | ||
1190 | |||
1191 | return 0; | ||
941 | } | 1192 | } |
942 | 1193 | ||
943 | /* | 1194 | /* |
944 | * Protocol Handling Functions | 1195 | * Protocol Handling Functions |
945 | */ | 1196 | */ |
946 | 1197 | ||
1198 | #define CIPSO_V4_OPT_LEN_MAX 40 | ||
947 | #define CIPSO_V4_HDR_LEN 6 | 1199 | #define CIPSO_V4_HDR_LEN 6 |
948 | 1200 | ||
949 | /** | 1201 | /** |
950 | * cipso_v4_gentag_hdr - Generate a CIPSO option header | 1202 | * cipso_v4_gentag_hdr - Generate a CIPSO option header |
951 | * @doi_def: the DOI definition | 1203 | * @doi_def: the DOI definition |
952 | * @len: the total tag length in bytes | 1204 | * @len: the total tag length in bytes, not including this header |
953 | * @buf: the CIPSO option buffer | 1205 | * @buf: the CIPSO option buffer |
954 | * | 1206 | * |
955 | * Description: | 1207 | * Description: |
956 | * Write a CIPSO header into the beginning of @buffer. Return zero on success, | 1208 | * Write a CIPSO header into the beginning of @buffer. |
957 | * negative values on failure. | ||
958 | * | 1209 | * |
959 | */ | 1210 | */ |
960 | static int cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, | 1211 | static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, |
961 | u32 len, | 1212 | unsigned char *buf, |
962 | unsigned char *buf) | 1213 | u32 len) |
963 | { | 1214 | { |
964 | if (CIPSO_V4_HDR_LEN + len > 40) | ||
965 | return -ENOSPC; | ||
966 | |||
967 | buf[0] = IPOPT_CIPSO; | 1215 | buf[0] = IPOPT_CIPSO; |
968 | buf[1] = CIPSO_V4_HDR_LEN + len; | 1216 | buf[1] = CIPSO_V4_HDR_LEN + len; |
969 | *(u32 *)&buf[2] = htonl(doi_def->doi); | 1217 | *(__be32 *)&buf[2] = htonl(doi_def->doi); |
970 | |||
971 | return 0; | ||
972 | } | 1218 | } |
973 | 1219 | ||
974 | #define CIPSO_V4_TAG1_CAT_LEN 30 | ||
975 | |||
976 | /** | 1220 | /** |
977 | * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) | 1221 | * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) |
978 | * @doi_def: the DOI definition | 1222 | * @doi_def: the DOI definition |
@@ -983,83 +1227,249 @@ static int cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, | |||
983 | * Description: | 1227 | * Description: |
984 | * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The | 1228 | * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The |
985 | * actual buffer length may be larger than the indicated size due to | 1229 | * actual buffer length may be larger than the indicated size due to |
986 | * translation between host and network category bitmaps. Returns zero on | 1230 | * translation between host and network category bitmaps. Returns the size of |
987 | * success, negative values on failure. | 1231 | * the tag on success, negative values on failure. |
988 | * | 1232 | * |
989 | */ | 1233 | */ |
990 | static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, | 1234 | static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, |
991 | const struct netlbl_lsm_secattr *secattr, | 1235 | const struct netlbl_lsm_secattr *secattr, |
992 | unsigned char **buffer, | 1236 | unsigned char *buffer, |
993 | u32 *buffer_len) | 1237 | u32 buffer_len) |
994 | { | 1238 | { |
995 | int ret_val = -EPERM; | 1239 | int ret_val; |
996 | unsigned char *buf = NULL; | 1240 | u32 tag_len; |
997 | u32 buf_len; | ||
998 | u32 level; | 1241 | u32 level; |
999 | 1242 | ||
1000 | if (secattr->mls_cat) { | 1243 | if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) |
1001 | buf = kzalloc(CIPSO_V4_HDR_LEN + 4 + CIPSO_V4_TAG1_CAT_LEN, | 1244 | return -EPERM; |
1002 | GFP_ATOMIC); | 1245 | |
1003 | if (buf == NULL) | 1246 | ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->mls_lvl, &level); |
1004 | return -ENOMEM; | 1247 | if (ret_val != 0) |
1248 | return ret_val; | ||
1005 | 1249 | ||
1250 | if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { | ||
1006 | ret_val = cipso_v4_map_cat_rbm_hton(doi_def, | 1251 | ret_val = cipso_v4_map_cat_rbm_hton(doi_def, |
1007 | secattr->mls_cat, | 1252 | secattr, |
1008 | secattr->mls_cat_len, | 1253 | &buffer[4], |
1009 | &buf[CIPSO_V4_HDR_LEN + 4], | 1254 | buffer_len - 4); |
1010 | CIPSO_V4_TAG1_CAT_LEN); | ||
1011 | if (ret_val < 0) | 1255 | if (ret_val < 0) |
1012 | goto gentag_failure; | 1256 | return ret_val; |
1013 | 1257 | ||
1014 | /* This will send packets using the "optimized" format when | 1258 | /* This will send packets using the "optimized" format when |
1015 | * possibile as specified in section 3.4.2.6 of the | 1259 | * possibile as specified in section 3.4.2.6 of the |
1016 | * CIPSO draft. */ | 1260 | * CIPSO draft. */ |
1017 | if (cipso_v4_rbm_optfmt && (ret_val > 0 && ret_val < 10)) | 1261 | if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) |
1018 | ret_val = 10; | 1262 | tag_len = 14; |
1263 | else | ||
1264 | tag_len = 4 + ret_val; | ||
1265 | } else | ||
1266 | tag_len = 4; | ||
1267 | |||
1268 | buffer[0] = 0x01; | ||
1269 | buffer[1] = tag_len; | ||
1270 | buffer[3] = level; | ||
1271 | |||
1272 | return tag_len; | ||
1273 | } | ||
1019 | 1274 | ||
1020 | buf_len = 4 + ret_val; | 1275 | /** |
1021 | } else { | 1276 | * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag |
1022 | buf = kzalloc(CIPSO_V4_HDR_LEN + 4, GFP_ATOMIC); | 1277 | * @doi_def: the DOI definition |
1023 | if (buf == NULL) | 1278 | * @tag: the CIPSO tag |
1279 | * @secattr: the security attributes | ||
1280 | * | ||
1281 | * Description: | ||
1282 | * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security | ||
1283 | * attributes in @secattr. Return zero on success, negatives values on | ||
1284 | * failure. | ||
1285 | * | ||
1286 | */ | ||
1287 | static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, | ||
1288 | const unsigned char *tag, | ||
1289 | struct netlbl_lsm_secattr *secattr) | ||
1290 | { | ||
1291 | int ret_val; | ||
1292 | u8 tag_len = tag[1]; | ||
1293 | u32 level; | ||
1294 | |||
1295 | ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); | ||
1296 | if (ret_val != 0) | ||
1297 | return ret_val; | ||
1298 | secattr->mls_lvl = level; | ||
1299 | secattr->flags |= NETLBL_SECATTR_MLS_LVL; | ||
1300 | |||
1301 | if (tag_len > 4) { | ||
1302 | secattr->mls_cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); | ||
1303 | if (secattr->mls_cat == NULL) | ||
1024 | return -ENOMEM; | 1304 | return -ENOMEM; |
1025 | buf_len = 4; | 1305 | |
1306 | ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, | ||
1307 | &tag[4], | ||
1308 | tag_len - 4, | ||
1309 | secattr); | ||
1310 | if (ret_val != 0) { | ||
1311 | netlbl_secattr_catmap_free(secattr->mls_cat); | ||
1312 | return ret_val; | ||
1313 | } | ||
1314 | |||
1315 | secattr->flags |= NETLBL_SECATTR_MLS_CAT; | ||
1026 | } | 1316 | } |
1027 | 1317 | ||
1318 | return 0; | ||
1319 | } | ||
1320 | |||
1321 | /** | ||
1322 | * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) | ||
1323 | * @doi_def: the DOI definition | ||
1324 | * @secattr: the security attributes | ||
1325 | * @buffer: the option buffer | ||
1326 | * @buffer_len: length of buffer in bytes | ||
1327 | * | ||
1328 | * Description: | ||
1329 | * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the | ||
1330 | * size of the tag on success, negative values on failure. | ||
1331 | * | ||
1332 | */ | ||
1333 | static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, | ||
1334 | const struct netlbl_lsm_secattr *secattr, | ||
1335 | unsigned char *buffer, | ||
1336 | u32 buffer_len) | ||
1337 | { | ||
1338 | int ret_val; | ||
1339 | u32 tag_len; | ||
1340 | u32 level; | ||
1341 | |||
1342 | if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) | ||
1343 | return -EPERM; | ||
1344 | |||
1028 | ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->mls_lvl, &level); | 1345 | ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->mls_lvl, &level); |
1029 | if (ret_val != 0) | 1346 | if (ret_val != 0) |
1030 | goto gentag_failure; | 1347 | return ret_val; |
1348 | |||
1349 | if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { | ||
1350 | ret_val = cipso_v4_map_cat_enum_hton(doi_def, | ||
1351 | secattr, | ||
1352 | &buffer[4], | ||
1353 | buffer_len - 4); | ||
1354 | if (ret_val < 0) | ||
1355 | return ret_val; | ||
1356 | |||
1357 | tag_len = 4 + ret_val; | ||
1358 | } else | ||
1359 | tag_len = 4; | ||
1360 | |||
1361 | buffer[0] = 0x02; | ||
1362 | buffer[1] = tag_len; | ||
1363 | buffer[3] = level; | ||
1031 | 1364 | ||
1032 | ret_val = cipso_v4_gentag_hdr(doi_def, buf_len, buf); | 1365 | return tag_len; |
1366 | } | ||
1367 | |||
1368 | /** | ||
1369 | * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag | ||
1370 | * @doi_def: the DOI definition | ||
1371 | * @tag: the CIPSO tag | ||
1372 | * @secattr: the security attributes | ||
1373 | * | ||
1374 | * Description: | ||
1375 | * Parse a CIPSO enumerated tag (tag type #2) and return the security | ||
1376 | * attributes in @secattr. Return zero on success, negatives values on | ||
1377 | * failure. | ||
1378 | * | ||
1379 | */ | ||
1380 | static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, | ||
1381 | const unsigned char *tag, | ||
1382 | struct netlbl_lsm_secattr *secattr) | ||
1383 | { | ||
1384 | int ret_val; | ||
1385 | u8 tag_len = tag[1]; | ||
1386 | u32 level; | ||
1387 | |||
1388 | ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); | ||
1033 | if (ret_val != 0) | 1389 | if (ret_val != 0) |
1034 | goto gentag_failure; | 1390 | return ret_val; |
1391 | secattr->mls_lvl = level; | ||
1392 | secattr->flags |= NETLBL_SECATTR_MLS_LVL; | ||
1393 | |||
1394 | if (tag_len > 4) { | ||
1395 | secattr->mls_cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); | ||
1396 | if (secattr->mls_cat == NULL) | ||
1397 | return -ENOMEM; | ||
1035 | 1398 | ||
1036 | buf[CIPSO_V4_HDR_LEN] = 0x01; | 1399 | ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, |
1037 | buf[CIPSO_V4_HDR_LEN + 1] = buf_len; | 1400 | &tag[4], |
1038 | buf[CIPSO_V4_HDR_LEN + 3] = level; | 1401 | tag_len - 4, |
1402 | secattr); | ||
1403 | if (ret_val != 0) { | ||
1404 | netlbl_secattr_catmap_free(secattr->mls_cat); | ||
1405 | return ret_val; | ||
1406 | } | ||
1039 | 1407 | ||
1040 | *buffer = buf; | 1408 | secattr->flags |= NETLBL_SECATTR_MLS_CAT; |
1041 | *buffer_len = CIPSO_V4_HDR_LEN + buf_len; | 1409 | } |
1042 | 1410 | ||
1043 | return 0; | 1411 | return 0; |
1412 | } | ||
1044 | 1413 | ||
1045 | gentag_failure: | 1414 | /** |
1046 | kfree(buf); | 1415 | * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) |
1047 | return ret_val; | 1416 | * @doi_def: the DOI definition |
1417 | * @secattr: the security attributes | ||
1418 | * @buffer: the option buffer | ||
1419 | * @buffer_len: length of buffer in bytes | ||
1420 | * | ||
1421 | * Description: | ||
1422 | * Generate a CIPSO option using the ranged tag, tag type #5. Returns the | ||
1423 | * size of the tag on success, negative values on failure. | ||
1424 | * | ||
1425 | */ | ||
1426 | static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, | ||
1427 | const struct netlbl_lsm_secattr *secattr, | ||
1428 | unsigned char *buffer, | ||
1429 | u32 buffer_len) | ||
1430 | { | ||
1431 | int ret_val; | ||
1432 | u32 tag_len; | ||
1433 | u32 level; | ||
1434 | |||
1435 | if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) | ||
1436 | return -EPERM; | ||
1437 | |||
1438 | ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->mls_lvl, &level); | ||
1439 | if (ret_val != 0) | ||
1440 | return ret_val; | ||
1441 | |||
1442 | if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { | ||
1443 | ret_val = cipso_v4_map_cat_rng_hton(doi_def, | ||
1444 | secattr, | ||
1445 | &buffer[4], | ||
1446 | buffer_len - 4); | ||
1447 | if (ret_val < 0) | ||
1448 | return ret_val; | ||
1449 | |||
1450 | tag_len = 4 + ret_val; | ||
1451 | } else | ||
1452 | tag_len = 4; | ||
1453 | |||
1454 | buffer[0] = 0x05; | ||
1455 | buffer[1] = tag_len; | ||
1456 | buffer[3] = level; | ||
1457 | |||
1458 | return tag_len; | ||
1048 | } | 1459 | } |
1049 | 1460 | ||
1050 | /** | 1461 | /** |
1051 | * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag | 1462 | * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag |
1052 | * @doi_def: the DOI definition | 1463 | * @doi_def: the DOI definition |
1053 | * @tag: the CIPSO tag | 1464 | * @tag: the CIPSO tag |
1054 | * @secattr: the security attributes | 1465 | * @secattr: the security attributes |
1055 | * | 1466 | * |
1056 | * Description: | 1467 | * Description: |
1057 | * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security | 1468 | * Parse a CIPSO ranged tag (tag type #5) and return the security attributes |
1058 | * attributes in @secattr. Return zero on success, negatives values on | 1469 | * in @secattr. Return zero on success, negatives values on failure. |
1059 | * failure. | ||
1060 | * | 1470 | * |
1061 | */ | 1471 | */ |
1062 | static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, | 1472 | static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, |
1063 | const unsigned char *tag, | 1473 | const unsigned char *tag, |
1064 | struct netlbl_lsm_secattr *secattr) | 1474 | struct netlbl_lsm_secattr *secattr) |
1065 | { | 1475 | { |
@@ -1071,32 +1481,23 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, | |||
1071 | if (ret_val != 0) | 1481 | if (ret_val != 0) |
1072 | return ret_val; | 1482 | return ret_val; |
1073 | secattr->mls_lvl = level; | 1483 | secattr->mls_lvl = level; |
1074 | secattr->mls_lvl_vld = 1; | 1484 | secattr->flags |= NETLBL_SECATTR_MLS_LVL; |
1075 | 1485 | ||
1076 | if (tag_len > 4) { | 1486 | if (tag_len > 4) { |
1077 | switch (doi_def->type) { | 1487 | secattr->mls_cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); |
1078 | case CIPSO_V4_MAP_PASS: | ||
1079 | secattr->mls_cat_len = tag_len - 4; | ||
1080 | break; | ||
1081 | case CIPSO_V4_MAP_STD: | ||
1082 | secattr->mls_cat_len = | ||
1083 | doi_def->map.std->cat.local_size; | ||
1084 | break; | ||
1085 | } | ||
1086 | secattr->mls_cat = kzalloc(secattr->mls_cat_len, GFP_ATOMIC); | ||
1087 | if (secattr->mls_cat == NULL) | 1488 | if (secattr->mls_cat == NULL) |
1088 | return -ENOMEM; | 1489 | return -ENOMEM; |
1089 | 1490 | ||
1090 | ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, | 1491 | ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, |
1091 | &tag[4], | 1492 | &tag[4], |
1092 | tag_len - 4, | 1493 | tag_len - 4, |
1093 | secattr->mls_cat, | 1494 | secattr); |
1094 | secattr->mls_cat_len); | 1495 | if (ret_val != 0) { |
1095 | if (ret_val < 0) { | 1496 | netlbl_secattr_catmap_free(secattr->mls_cat); |
1096 | kfree(secattr->mls_cat); | ||
1097 | return ret_val; | 1497 | return ret_val; |
1098 | } | 1498 | } |
1099 | secattr->mls_cat_len = ret_val; | 1499 | |
1500 | secattr->flags |= NETLBL_SECATTR_MLS_CAT; | ||
1100 | } | 1501 | } |
1101 | 1502 | ||
1102 | return 0; | 1503 | return 0; |
@@ -1140,7 +1541,7 @@ int cipso_v4_validate(unsigned char **option) | |||
1140 | } | 1541 | } |
1141 | 1542 | ||
1142 | rcu_read_lock(); | 1543 | rcu_read_lock(); |
1143 | doi_def = cipso_v4_doi_getdef(ntohl(*((u32 *)&opt[2]))); | 1544 | doi_def = cipso_v4_doi_search(ntohl(*((__be32 *)&opt[2]))); |
1144 | if (doi_def == NULL) { | 1545 | if (doi_def == NULL) { |
1145 | err_offset = 2; | 1546 | err_offset = 2; |
1146 | goto validate_return_locked; | 1547 | goto validate_return_locked; |
@@ -1191,6 +1592,44 @@ int cipso_v4_validate(unsigned char **option) | |||
1191 | } | 1592 | } |
1192 | } | 1593 | } |
1193 | break; | 1594 | break; |
1595 | case CIPSO_V4_TAG_ENUM: | ||
1596 | if (tag_len < 4) { | ||
1597 | err_offset = opt_iter + 1; | ||
1598 | goto validate_return_locked; | ||
1599 | } | ||
1600 | |||
1601 | if (cipso_v4_map_lvl_valid(doi_def, | ||
1602 | tag[3]) < 0) { | ||
1603 | err_offset = opt_iter + 3; | ||
1604 | goto validate_return_locked; | ||
1605 | } | ||
1606 | if (tag_len > 4 && | ||
1607 | cipso_v4_map_cat_enum_valid(doi_def, | ||
1608 | &tag[4], | ||
1609 | tag_len - 4) < 0) { | ||
1610 | err_offset = opt_iter + 4; | ||
1611 | goto validate_return_locked; | ||
1612 | } | ||
1613 | break; | ||
1614 | case CIPSO_V4_TAG_RANGE: | ||
1615 | if (tag_len < 4) { | ||
1616 | err_offset = opt_iter + 1; | ||
1617 | goto validate_return_locked; | ||
1618 | } | ||
1619 | |||
1620 | if (cipso_v4_map_lvl_valid(doi_def, | ||
1621 | tag[3]) < 0) { | ||
1622 | err_offset = opt_iter + 3; | ||
1623 | goto validate_return_locked; | ||
1624 | } | ||
1625 | if (tag_len > 4 && | ||
1626 | cipso_v4_map_cat_rng_valid(doi_def, | ||
1627 | &tag[4], | ||
1628 | tag_len - 4) < 0) { | ||
1629 | err_offset = opt_iter + 4; | ||
1630 | goto validate_return_locked; | ||
1631 | } | ||
1632 | break; | ||
1194 | default: | 1633 | default: |
1195 | err_offset = opt_iter; | 1634 | err_offset = opt_iter; |
1196 | goto validate_return_locked; | 1635 | goto validate_return_locked; |
@@ -1265,7 +1704,7 @@ int cipso_v4_socket_setattr(const struct socket *sock, | |||
1265 | { | 1704 | { |
1266 | int ret_val = -EPERM; | 1705 | int ret_val = -EPERM; |
1267 | u32 iter; | 1706 | u32 iter; |
1268 | unsigned char *buf = NULL; | 1707 | unsigned char *buf; |
1269 | u32 buf_len = 0; | 1708 | u32 buf_len = 0; |
1270 | u32 opt_len; | 1709 | u32 opt_len; |
1271 | struct ip_options *opt = NULL; | 1710 | struct ip_options *opt = NULL; |
@@ -1281,17 +1720,40 @@ int cipso_v4_socket_setattr(const struct socket *sock, | |||
1281 | if (sk == NULL) | 1720 | if (sk == NULL) |
1282 | return 0; | 1721 | return 0; |
1283 | 1722 | ||
1723 | /* We allocate the maximum CIPSO option size here so we are probably | ||
1724 | * being a little wasteful, but it makes our life _much_ easier later | ||
1725 | * on and after all we are only talking about 40 bytes. */ | ||
1726 | buf_len = CIPSO_V4_OPT_LEN_MAX; | ||
1727 | buf = kmalloc(buf_len, GFP_ATOMIC); | ||
1728 | if (buf == NULL) { | ||
1729 | ret_val = -ENOMEM; | ||
1730 | goto socket_setattr_failure; | ||
1731 | } | ||
1732 | |||
1284 | /* XXX - This code assumes only one tag per CIPSO option which isn't | 1733 | /* XXX - This code assumes only one tag per CIPSO option which isn't |
1285 | * really a good assumption to make but since we only support the MAC | 1734 | * really a good assumption to make but since we only support the MAC |
1286 | * tags right now it is a safe assumption. */ | 1735 | * tags right now it is a safe assumption. */ |
1287 | iter = 0; | 1736 | iter = 0; |
1288 | do { | 1737 | do { |
1738 | memset(buf, 0, buf_len); | ||
1289 | switch (doi_def->tags[iter]) { | 1739 | switch (doi_def->tags[iter]) { |
1290 | case CIPSO_V4_TAG_RBITMAP: | 1740 | case CIPSO_V4_TAG_RBITMAP: |
1291 | ret_val = cipso_v4_gentag_rbm(doi_def, | 1741 | ret_val = cipso_v4_gentag_rbm(doi_def, |
1292 | secattr, | 1742 | secattr, |
1293 | &buf, | 1743 | &buf[CIPSO_V4_HDR_LEN], |
1294 | &buf_len); | 1744 | buf_len - CIPSO_V4_HDR_LEN); |
1745 | break; | ||
1746 | case CIPSO_V4_TAG_ENUM: | ||
1747 | ret_val = cipso_v4_gentag_enum(doi_def, | ||
1748 | secattr, | ||
1749 | &buf[CIPSO_V4_HDR_LEN], | ||
1750 | buf_len - CIPSO_V4_HDR_LEN); | ||
1751 | break; | ||
1752 | case CIPSO_V4_TAG_RANGE: | ||
1753 | ret_val = cipso_v4_gentag_rng(doi_def, | ||
1754 | secattr, | ||
1755 | &buf[CIPSO_V4_HDR_LEN], | ||
1756 | buf_len - CIPSO_V4_HDR_LEN); | ||
1295 | break; | 1757 | break; |
1296 | default: | 1758 | default: |
1297 | ret_val = -EPERM; | 1759 | ret_val = -EPERM; |
@@ -1299,11 +1761,13 @@ int cipso_v4_socket_setattr(const struct socket *sock, | |||
1299 | } | 1761 | } |
1300 | 1762 | ||
1301 | iter++; | 1763 | iter++; |
1302 | } while (ret_val != 0 && | 1764 | } while (ret_val < 0 && |
1303 | iter < CIPSO_V4_TAG_MAXCNT && | 1765 | iter < CIPSO_V4_TAG_MAXCNT && |
1304 | doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); | 1766 | doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); |
1305 | if (ret_val != 0) | 1767 | if (ret_val < 0) |
1306 | goto socket_setattr_failure; | 1768 | goto socket_setattr_failure; |
1769 | cipso_v4_gentag_hdr(doi_def, buf, ret_val); | ||
1770 | buf_len = CIPSO_V4_HDR_LEN + ret_val; | ||
1307 | 1771 | ||
1308 | /* We can't use ip_options_get() directly because it makes a call to | 1772 | /* We can't use ip_options_get() directly because it makes a call to |
1309 | * ip_options_get_alloc() which allocates memory with GFP_KERNEL and | 1773 | * ip_options_get_alloc() which allocates memory with GFP_KERNEL and |
@@ -1370,19 +1834,33 @@ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) | |||
1370 | if (ret_val == 0) | 1834 | if (ret_val == 0) |
1371 | return ret_val; | 1835 | return ret_val; |
1372 | 1836 | ||
1373 | doi = ntohl(*(u32 *)&cipso_ptr[2]); | 1837 | doi = ntohl(*(__be32 *)&cipso_ptr[2]); |
1374 | rcu_read_lock(); | 1838 | rcu_read_lock(); |
1375 | doi_def = cipso_v4_doi_getdef(doi); | 1839 | doi_def = cipso_v4_doi_search(doi); |
1376 | if (doi_def == NULL) { | 1840 | if (doi_def == NULL) { |
1377 | rcu_read_unlock(); | 1841 | rcu_read_unlock(); |
1378 | return -ENOMSG; | 1842 | return -ENOMSG; |
1379 | } | 1843 | } |
1844 | |||
1845 | /* XXX - This code assumes only one tag per CIPSO option which isn't | ||
1846 | * really a good assumption to make but since we only support the MAC | ||
1847 | * tags right now it is a safe assumption. */ | ||
1380 | switch (cipso_ptr[6]) { | 1848 | switch (cipso_ptr[6]) { |
1381 | case CIPSO_V4_TAG_RBITMAP: | 1849 | case CIPSO_V4_TAG_RBITMAP: |
1382 | ret_val = cipso_v4_parsetag_rbm(doi_def, | 1850 | ret_val = cipso_v4_parsetag_rbm(doi_def, |
1383 | &cipso_ptr[6], | 1851 | &cipso_ptr[6], |
1384 | secattr); | 1852 | secattr); |
1385 | break; | 1853 | break; |
1854 | case CIPSO_V4_TAG_ENUM: | ||
1855 | ret_val = cipso_v4_parsetag_enum(doi_def, | ||
1856 | &cipso_ptr[6], | ||
1857 | secattr); | ||
1858 | break; | ||
1859 | case CIPSO_V4_TAG_RANGE: | ||
1860 | ret_val = cipso_v4_parsetag_rng(doi_def, | ||
1861 | &cipso_ptr[6], | ||
1862 | secattr); | ||
1863 | break; | ||
1386 | } | 1864 | } |
1387 | rcu_read_unlock(); | 1865 | rcu_read_unlock(); |
1388 | 1866 | ||
@@ -1430,23 +1908,30 @@ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, | |||
1430 | u32 doi; | 1908 | u32 doi; |
1431 | struct cipso_v4_doi *doi_def; | 1909 | struct cipso_v4_doi *doi_def; |
1432 | 1910 | ||
1433 | if (!CIPSO_V4_OPTEXIST(skb)) | ||
1434 | return -ENOMSG; | ||
1435 | cipso_ptr = CIPSO_V4_OPTPTR(skb); | 1911 | cipso_ptr = CIPSO_V4_OPTPTR(skb); |
1436 | if (cipso_v4_cache_check(cipso_ptr, cipso_ptr[1], secattr) == 0) | 1912 | if (cipso_v4_cache_check(cipso_ptr, cipso_ptr[1], secattr) == 0) |
1437 | return 0; | 1913 | return 0; |
1438 | 1914 | ||
1439 | doi = ntohl(*(u32 *)&cipso_ptr[2]); | 1915 | doi = ntohl(*(__be32 *)&cipso_ptr[2]); |
1440 | rcu_read_lock(); | 1916 | rcu_read_lock(); |
1441 | doi_def = cipso_v4_doi_getdef(doi); | 1917 | doi_def = cipso_v4_doi_search(doi); |
1442 | if (doi_def == NULL) | 1918 | if (doi_def == NULL) |
1443 | goto skbuff_getattr_return; | 1919 | goto skbuff_getattr_return; |
1920 | |||
1921 | /* XXX - This code assumes only one tag per CIPSO option which isn't | ||
1922 | * really a good assumption to make but since we only support the MAC | ||
1923 | * tags right now it is a safe assumption. */ | ||
1444 | switch (cipso_ptr[6]) { | 1924 | switch (cipso_ptr[6]) { |
1445 | case CIPSO_V4_TAG_RBITMAP: | 1925 | case CIPSO_V4_TAG_RBITMAP: |
1446 | ret_val = cipso_v4_parsetag_rbm(doi_def, | 1926 | ret_val = cipso_v4_parsetag_rbm(doi_def, |
1447 | &cipso_ptr[6], | 1927 | &cipso_ptr[6], |
1448 | secattr); | 1928 | secattr); |
1449 | break; | 1929 | break; |
1930 | case CIPSO_V4_TAG_ENUM: | ||
1931 | ret_val = cipso_v4_parsetag_enum(doi_def, | ||
1932 | &cipso_ptr[6], | ||
1933 | secattr); | ||
1934 | break; | ||
1450 | } | 1935 | } |
1451 | 1936 | ||
1452 | skbuff_getattr_return: | 1937 | skbuff_getattr_return: |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 7602c79a389b..2fd899160f85 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -577,20 +577,20 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg | |||
577 | * Determine a default network mask, based on the IP address. | 577 | * Determine a default network mask, based on the IP address. |
578 | */ | 578 | */ |
579 | 579 | ||
580 | static __inline__ int inet_abc_len(u32 addr) | 580 | static __inline__ int inet_abc_len(__be32 addr) |
581 | { | 581 | { |
582 | int rc = -1; /* Something else, probably a multicast. */ | 582 | int rc = -1; /* Something else, probably a multicast. */ |
583 | 583 | ||
584 | if (ZERONET(addr)) | 584 | if (ZERONET(addr)) |
585 | rc = 0; | 585 | rc = 0; |
586 | else { | 586 | else { |
587 | addr = ntohl(addr); | 587 | __u32 haddr = ntohl(addr); |
588 | 588 | ||
589 | if (IN_CLASSA(addr)) | 589 | if (IN_CLASSA(haddr)) |
590 | rc = 8; | 590 | rc = 8; |
591 | else if (IN_CLASSB(addr)) | 591 | else if (IN_CLASSB(haddr)) |
592 | rc = 16; | 592 | rc = 16; |
593 | else if (IN_CLASSC(addr)) | 593 | else if (IN_CLASSC(haddr)) |
594 | rc = 24; | 594 | rc = 24; |
595 | } | 595 | } |
596 | 596 | ||
@@ -1120,6 +1120,16 @@ static struct notifier_block ip_netdev_notifier = { | |||
1120 | .notifier_call =inetdev_event, | 1120 | .notifier_call =inetdev_event, |
1121 | }; | 1121 | }; |
1122 | 1122 | ||
1123 | static inline size_t inet_nlmsg_size(void) | ||
1124 | { | ||
1125 | return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) | ||
1126 | + nla_total_size(4) /* IFA_ADDRESS */ | ||
1127 | + nla_total_size(4) /* IFA_LOCAL */ | ||
1128 | + nla_total_size(4) /* IFA_BROADCAST */ | ||
1129 | + nla_total_size(4) /* IFA_ANYCAST */ | ||
1130 | + nla_total_size(IFNAMSIZ); /* IFA_LABEL */ | ||
1131 | } | ||
1132 | |||
1123 | static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, | 1133 | static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, |
1124 | u32 pid, u32 seq, int event, unsigned int flags) | 1134 | u32 pid, u32 seq, int event, unsigned int flags) |
1125 | { | 1135 | { |
@@ -1208,15 +1218,13 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa, struct nlmsghdr *nlh, | |||
1208 | u32 seq = nlh ? nlh->nlmsg_seq : 0; | 1218 | u32 seq = nlh ? nlh->nlmsg_seq : 0; |
1209 | int err = -ENOBUFS; | 1219 | int err = -ENOBUFS; |
1210 | 1220 | ||
1211 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | 1221 | skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL); |
1212 | if (skb == NULL) | 1222 | if (skb == NULL) |
1213 | goto errout; | 1223 | goto errout; |
1214 | 1224 | ||
1215 | err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0); | 1225 | err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0); |
1216 | if (err < 0) { | 1226 | /* failure implies BUG in inet_nlmsg_size() */ |
1217 | kfree_skb(skb); | 1227 | BUG_ON(err < 0); |
1218 | goto errout; | ||
1219 | } | ||
1220 | 1228 | ||
1221 | err = rtnl_notify(skb, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL); | 1229 | err = rtnl_notify(skb, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL); |
1222 | errout: | 1230 | errout: |
@@ -1556,12 +1564,12 @@ static void devinet_sysctl_register(struct in_device *in_dev, | |||
1556 | { | 1564 | { |
1557 | int i; | 1565 | int i; |
1558 | struct net_device *dev = in_dev ? in_dev->dev : NULL; | 1566 | struct net_device *dev = in_dev ? in_dev->dev : NULL; |
1559 | struct devinet_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL); | 1567 | struct devinet_sysctl_table *t = kmemdup(&devinet_sysctl, sizeof(*t), |
1568 | GFP_KERNEL); | ||
1560 | char *dev_name = NULL; | 1569 | char *dev_name = NULL; |
1561 | 1570 | ||
1562 | if (!t) | 1571 | if (!t) |
1563 | return; | 1572 | return; |
1564 | memcpy(t, &devinet_sysctl, sizeof(*t)); | ||
1565 | for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) { | 1573 | for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) { |
1566 | t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf; | 1574 | t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf; |
1567 | t->devinet_vars[i].de = NULL; | 1575 | t->devinet_vars[i].de = NULL; |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index b5c205b57669..f2c6776ea0e6 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -67,7 +67,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
67 | if (x->encap) { | 67 | if (x->encap) { |
68 | struct xfrm_encap_tmpl *encap = x->encap; | 68 | struct xfrm_encap_tmpl *encap = x->encap; |
69 | struct udphdr *uh; | 69 | struct udphdr *uh; |
70 | u32 *udpdata32; | 70 | __be32 *udpdata32; |
71 | 71 | ||
72 | uh = (struct udphdr *)esph; | 72 | uh = (struct udphdr *)esph; |
73 | uh->source = encap->encap_sport; | 73 | uh->source = encap->encap_sport; |
@@ -81,7 +81,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
81 | esph = (struct ip_esp_hdr *)(uh + 1); | 81 | esph = (struct ip_esp_hdr *)(uh + 1); |
82 | break; | 82 | break; |
83 | case UDP_ENCAP_ESPINUDP_NON_IKE: | 83 | case UDP_ENCAP_ESPINUDP_NON_IKE: |
84 | udpdata32 = (u32 *)(uh + 1); | 84 | udpdata32 = (__be32 *)(uh + 1); |
85 | udpdata32[0] = udpdata32[1] = 0; | 85 | udpdata32[0] = udpdata32[1] = 0; |
86 | esph = (struct ip_esp_hdr *)(udpdata32 + 2); | 86 | esph = (struct ip_esp_hdr *)(udpdata32 + 2); |
87 | break; | 87 | break; |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index af0190d8b6c0..d47b72af89ed 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -768,8 +768,8 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb ) | |||
768 | { | 768 | { |
769 | 769 | ||
770 | struct fib_result res; | 770 | struct fib_result res; |
771 | struct flowi fl = { .nl_u = { .ip4_u = { .daddr = frn->fl_addr, | 771 | struct flowi fl = { .mark = frn->fl_mark, |
772 | .fwmark = frn->fl_fwmark, | 772 | .nl_u = { .ip4_u = { .daddr = frn->fl_addr, |
773 | .tos = frn->fl_tos, | 773 | .tos = frn->fl_tos, |
774 | .scope = frn->fl_scope } } }; | 774 | .scope = frn->fl_scope } } }; |
775 | if (tb) { | 775 | if (tb) { |
@@ -811,7 +811,6 @@ static void nl_fib_input(struct sock *sk, int len) | |||
811 | 811 | ||
812 | pid = nlh->nlmsg_pid; /*pid of sending process */ | 812 | pid = nlh->nlmsg_pid; /*pid of sending process */ |
813 | NETLINK_CB(skb).pid = 0; /* from kernel */ | 813 | NETLINK_CB(skb).pid = 0; /* from kernel */ |
814 | NETLINK_CB(skb).dst_pid = pid; | ||
815 | NETLINK_CB(skb).dst_group = 0; /* unicast */ | 814 | NETLINK_CB(skb).dst_group = 0; /* unicast */ |
816 | netlink_unicast(sk, skb, pid, MSG_DONTWAIT); | 815 | netlink_unicast(sk, skb, pid, MSG_DONTWAIT); |
817 | } | 816 | } |
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 0852b9cd065a..b837c33e0404 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c | |||
@@ -44,10 +44,6 @@ struct fib4_rule | |||
44 | __be32 srcmask; | 44 | __be32 srcmask; |
45 | __be32 dst; | 45 | __be32 dst; |
46 | __be32 dstmask; | 46 | __be32 dstmask; |
47 | #ifdef CONFIG_IP_ROUTE_FWMARK | ||
48 | u32 fwmark; | ||
49 | u32 fwmask; | ||
50 | #endif | ||
51 | #ifdef CONFIG_NET_CLS_ROUTE | 47 | #ifdef CONFIG_NET_CLS_ROUTE |
52 | u32 tclassid; | 48 | u32 tclassid; |
53 | #endif | 49 | #endif |
@@ -160,11 +156,6 @@ static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) | |||
160 | if (r->tos && (r->tos != fl->fl4_tos)) | 156 | if (r->tos && (r->tos != fl->fl4_tos)) |
161 | return 0; | 157 | return 0; |
162 | 158 | ||
163 | #ifdef CONFIG_IP_ROUTE_FWMARK | ||
164 | if ((r->fwmark ^ fl->fl4_fwmark) & r->fwmask) | ||
165 | return 0; | ||
166 | #endif | ||
167 | |||
168 | return 1; | 159 | return 1; |
169 | } | 160 | } |
170 | 161 | ||
@@ -179,14 +170,10 @@ static struct fib_table *fib_empty_table(void) | |||
179 | } | 170 | } |
180 | 171 | ||
181 | static struct nla_policy fib4_rule_policy[FRA_MAX+1] __read_mostly = { | 172 | static struct nla_policy fib4_rule_policy[FRA_MAX+1] __read_mostly = { |
182 | [FRA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, | 173 | FRA_GENERIC_POLICY, |
183 | [FRA_PRIORITY] = { .type = NLA_U32 }, | ||
184 | [FRA_SRC] = { .type = NLA_U32 }, | 174 | [FRA_SRC] = { .type = NLA_U32 }, |
185 | [FRA_DST] = { .type = NLA_U32 }, | 175 | [FRA_DST] = { .type = NLA_U32 }, |
186 | [FRA_FWMARK] = { .type = NLA_U32 }, | ||
187 | [FRA_FWMASK] = { .type = NLA_U32 }, | ||
188 | [FRA_FLOW] = { .type = NLA_U32 }, | 176 | [FRA_FLOW] = { .type = NLA_U32 }, |
189 | [FRA_TABLE] = { .type = NLA_U32 }, | ||
190 | }; | 177 | }; |
191 | 178 | ||
192 | static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, | 179 | static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, |
@@ -220,20 +207,6 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, | |||
220 | if (tb[FRA_DST]) | 207 | if (tb[FRA_DST]) |
221 | rule4->dst = nla_get_be32(tb[FRA_DST]); | 208 | rule4->dst = nla_get_be32(tb[FRA_DST]); |
222 | 209 | ||
223 | #ifdef CONFIG_IP_ROUTE_FWMARK | ||
224 | if (tb[FRA_FWMARK]) { | ||
225 | rule4->fwmark = nla_get_u32(tb[FRA_FWMARK]); | ||
226 | if (rule4->fwmark) | ||
227 | /* compatibility: if the mark value is non-zero all bits | ||
228 | * are compared unless a mask is explicitly specified. | ||
229 | */ | ||
230 | rule4->fwmask = 0xFFFFFFFF; | ||
231 | } | ||
232 | |||
233 | if (tb[FRA_FWMASK]) | ||
234 | rule4->fwmask = nla_get_u32(tb[FRA_FWMASK]); | ||
235 | #endif | ||
236 | |||
237 | #ifdef CONFIG_NET_CLS_ROUTE | 210 | #ifdef CONFIG_NET_CLS_ROUTE |
238 | if (tb[FRA_FLOW]) | 211 | if (tb[FRA_FLOW]) |
239 | rule4->tclassid = nla_get_u32(tb[FRA_FLOW]); | 212 | rule4->tclassid = nla_get_u32(tb[FRA_FLOW]); |
@@ -264,14 +237,6 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, | |||
264 | if (frh->tos && (rule4->tos != frh->tos)) | 237 | if (frh->tos && (rule4->tos != frh->tos)) |
265 | return 0; | 238 | return 0; |
266 | 239 | ||
267 | #ifdef CONFIG_IP_ROUTE_FWMARK | ||
268 | if (tb[FRA_FWMARK] && (rule4->fwmark != nla_get_u32(tb[FRA_FWMARK]))) | ||
269 | return 0; | ||
270 | |||
271 | if (tb[FRA_FWMASK] && (rule4->fwmask != nla_get_u32(tb[FRA_FWMASK]))) | ||
272 | return 0; | ||
273 | #endif | ||
274 | |||
275 | #ifdef CONFIG_NET_CLS_ROUTE | 240 | #ifdef CONFIG_NET_CLS_ROUTE |
276 | if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW]))) | 241 | if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW]))) |
277 | return 0; | 242 | return 0; |
@@ -296,14 +261,6 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | |||
296 | frh->src_len = rule4->src_len; | 261 | frh->src_len = rule4->src_len; |
297 | frh->tos = rule4->tos; | 262 | frh->tos = rule4->tos; |
298 | 263 | ||
299 | #ifdef CONFIG_IP_ROUTE_FWMARK | ||
300 | if (rule4->fwmark) | ||
301 | NLA_PUT_U32(skb, FRA_FWMARK, rule4->fwmark); | ||
302 | |||
303 | if (rule4->fwmask || rule4->fwmark) | ||
304 | NLA_PUT_U32(skb, FRA_FWMASK, rule4->fwmask); | ||
305 | #endif | ||
306 | |||
307 | if (rule4->dst_len) | 264 | if (rule4->dst_len) |
308 | NLA_PUT_BE32(skb, FRA_DST, rule4->dst); | 265 | NLA_PUT_BE32(skb, FRA_DST, rule4->dst); |
309 | 266 | ||
@@ -342,6 +299,13 @@ static u32 fib4_rule_default_pref(void) | |||
342 | return 0; | 299 | return 0; |
343 | } | 300 | } |
344 | 301 | ||
302 | static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) | ||
303 | { | ||
304 | return nla_total_size(4) /* dst */ | ||
305 | + nla_total_size(4) /* src */ | ||
306 | + nla_total_size(4); /* flow */ | ||
307 | } | ||
308 | |||
345 | static struct fib_rules_ops fib4_rules_ops = { | 309 | static struct fib_rules_ops fib4_rules_ops = { |
346 | .family = AF_INET, | 310 | .family = AF_INET, |
347 | .rule_size = sizeof(struct fib4_rule), | 311 | .rule_size = sizeof(struct fib4_rule), |
@@ -351,6 +315,7 @@ static struct fib_rules_ops fib4_rules_ops = { | |||
351 | .compare = fib4_rule_compare, | 315 | .compare = fib4_rule_compare, |
352 | .fill = fib4_rule_fill, | 316 | .fill = fib4_rule_fill, |
353 | .default_pref = fib4_rule_default_pref, | 317 | .default_pref = fib4_rule_default_pref, |
318 | .nlmsg_payload = fib4_rule_nlmsg_payload, | ||
354 | .nlgroup = RTNLGRP_IPV4_RULE, | 319 | .nlgroup = RTNLGRP_IPV4_RULE, |
355 | .policy = fib4_rule_policy, | 320 | .policy = fib4_rule_policy, |
356 | .rules_list = &fib4_rules, | 321 | .rules_list = &fib4_rules, |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 884d176e0082..e63b8a98fb4d 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -273,25 +273,49 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev) | |||
273 | return -1; | 273 | return -1; |
274 | } | 274 | } |
275 | 275 | ||
276 | static inline size_t fib_nlmsg_size(struct fib_info *fi) | ||
277 | { | ||
278 | size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) | ||
279 | + nla_total_size(4) /* RTA_TABLE */ | ||
280 | + nla_total_size(4) /* RTA_DST */ | ||
281 | + nla_total_size(4) /* RTA_PRIORITY */ | ||
282 | + nla_total_size(4); /* RTA_PREFSRC */ | ||
283 | |||
284 | /* space for nested metrics */ | ||
285 | payload += nla_total_size((RTAX_MAX * nla_total_size(4))); | ||
286 | |||
287 | if (fi->fib_nhs) { | ||
288 | /* Also handles the special case fib_nhs == 1 */ | ||
289 | |||
290 | /* each nexthop is packed in an attribute */ | ||
291 | size_t nhsize = nla_total_size(sizeof(struct rtnexthop)); | ||
292 | |||
293 | /* may contain flow and gateway attribute */ | ||
294 | nhsize += 2 * nla_total_size(4); | ||
295 | |||
296 | /* all nexthops are packed in a nested attribute */ | ||
297 | payload += nla_total_size(fi->fib_nhs * nhsize); | ||
298 | } | ||
299 | |||
300 | return payload; | ||
301 | } | ||
302 | |||
276 | void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, | 303 | void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, |
277 | int dst_len, u32 tb_id, struct nl_info *info) | 304 | int dst_len, u32 tb_id, struct nl_info *info) |
278 | { | 305 | { |
279 | struct sk_buff *skb; | 306 | struct sk_buff *skb; |
280 | int payload = sizeof(struct rtmsg) + 256; | ||
281 | u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; | 307 | u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; |
282 | int err = -ENOBUFS; | 308 | int err = -ENOBUFS; |
283 | 309 | ||
284 | skb = nlmsg_new(nlmsg_total_size(payload), GFP_KERNEL); | 310 | skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); |
285 | if (skb == NULL) | 311 | if (skb == NULL) |
286 | goto errout; | 312 | goto errout; |
287 | 313 | ||
288 | err = fib_dump_info(skb, info->pid, seq, event, tb_id, | 314 | err = fib_dump_info(skb, info->pid, seq, event, tb_id, |
289 | fa->fa_type, fa->fa_scope, key, dst_len, | 315 | fa->fa_type, fa->fa_scope, key, dst_len, |
290 | fa->fa_tos, fa->fa_info, 0); | 316 | fa->fa_tos, fa->fa_info, 0); |
291 | if (err < 0) { | 317 | /* failure implies BUG in fib_nlmsg_size() */ |
292 | kfree_skb(skb); | 318 | BUG_ON(err < 0); |
293 | goto errout; | ||
294 | } | ||
295 | 319 | ||
296 | err = rtnl_notify(skb, info->pid, RTNLGRP_IPV4_ROUTE, | 320 | err = rtnl_notify(skb, info->pid, RTNLGRP_IPV4_ROUTE, |
297 | info->nlh, GFP_KERNEL); | 321 | info->nlh, GFP_KERNEL); |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index b39a37a47545..40cf0d0e1b83 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -332,7 +332,7 @@ static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd, | |||
332 | struct sk_buff *skb) | 332 | struct sk_buff *skb) |
333 | { | 333 | { |
334 | struct icmp_bxm *icmp_param = (struct icmp_bxm *)from; | 334 | struct icmp_bxm *icmp_param = (struct icmp_bxm *)from; |
335 | unsigned int csum; | 335 | __wsum csum; |
336 | 336 | ||
337 | csum = skb_copy_and_csum_bits(icmp_param->skb, | 337 | csum = skb_copy_and_csum_bits(icmp_param->skb, |
338 | icmp_param->offset + offset, | 338 | icmp_param->offset + offset, |
@@ -356,7 +356,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, | |||
356 | ip_flush_pending_frames(icmp_socket->sk); | 356 | ip_flush_pending_frames(icmp_socket->sk); |
357 | else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { | 357 | else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { |
358 | struct icmphdr *icmph = skb->h.icmph; | 358 | struct icmphdr *icmph = skb->h.icmph; |
359 | unsigned int csum = 0; | 359 | __wsum csum = 0; |
360 | struct sk_buff *skb1; | 360 | struct sk_buff *skb1; |
361 | 361 | ||
362 | skb_queue_walk(&icmp_socket->sk->sk_write_queue, skb1) { | 362 | skb_queue_walk(&icmp_socket->sk->sk_write_queue, skb1) { |
@@ -931,7 +931,7 @@ int icmp_rcv(struct sk_buff *skb) | |||
931 | 931 | ||
932 | switch (skb->ip_summed) { | 932 | switch (skb->ip_summed) { |
933 | case CHECKSUM_COMPLETE: | 933 | case CHECKSUM_COMPLETE: |
934 | if (!(u16)csum_fold(skb->csum)) | 934 | if (!csum_fold(skb->csum)) |
935 | break; | 935 | break; |
936 | /* fall through */ | 936 | /* fall through */ |
937 | case CHECKSUM_NONE: | 937 | case CHECKSUM_NONE: |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 6eee71647b7c..0017ccb01d6d 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -932,7 +932,7 @@ int igmp_rcv(struct sk_buff *skb) | |||
932 | 932 | ||
933 | switch (skb->ip_summed) { | 933 | switch (skb->ip_summed) { |
934 | case CHECKSUM_COMPLETE: | 934 | case CHECKSUM_COMPLETE: |
935 | if (!(u16)csum_fold(skb->csum)) | 935 | if (!csum_fold(skb->csum)) |
936 | break; | 936 | break; |
937 | /* fall through */ | 937 | /* fall through */ |
938 | case CHECKSUM_NONE: | 938 | case CHECKSUM_NONE: |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 96bbe2a0aa1b..9d68837888d3 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -343,7 +343,7 @@ struct dst_entry* inet_csk_route_req(struct sock *sk, | |||
343 | EXPORT_SYMBOL_GPL(inet_csk_route_req); | 343 | EXPORT_SYMBOL_GPL(inet_csk_route_req); |
344 | 344 | ||
345 | static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, | 345 | static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, |
346 | const u32 rnd, const u16 synq_hsize) | 346 | const u32 rnd, const u32 synq_hsize) |
347 | { | 347 | { |
348 | return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1); | 348 | return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1); |
349 | } | 349 | } |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d5b5dec075b8..476cb6084c75 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -144,7 +144,7 @@ static struct net_device *ipgre_fb_tunnel_dev; | |||
144 | */ | 144 | */ |
145 | 145 | ||
146 | #define HASH_SIZE 16 | 146 | #define HASH_SIZE 16 |
147 | #define HASH(addr) ((addr^(addr>>4))&0xF) | 147 | #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) |
148 | 148 | ||
149 | static struct ip_tunnel *tunnels[4][HASH_SIZE]; | 149 | static struct ip_tunnel *tunnels[4][HASH_SIZE]; |
150 | 150 | ||
@@ -157,7 +157,7 @@ static DEFINE_RWLOCK(ipgre_lock); | |||
157 | 157 | ||
158 | /* Given src, dst and key, find appropriate for input tunnel. */ | 158 | /* Given src, dst and key, find appropriate for input tunnel. */ |
159 | 159 | ||
160 | static struct ip_tunnel * ipgre_tunnel_lookup(u32 remote, u32 local, u32 key) | 160 | static struct ip_tunnel * ipgre_tunnel_lookup(__be32 remote, __be32 local, __be32 key) |
161 | { | 161 | { |
162 | unsigned h0 = HASH(remote); | 162 | unsigned h0 = HASH(remote); |
163 | unsigned h1 = HASH(key); | 163 | unsigned h1 = HASH(key); |
@@ -194,9 +194,9 @@ static struct ip_tunnel * ipgre_tunnel_lookup(u32 remote, u32 local, u32 key) | |||
194 | 194 | ||
195 | static struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t) | 195 | static struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t) |
196 | { | 196 | { |
197 | u32 remote = t->parms.iph.daddr; | 197 | __be32 remote = t->parms.iph.daddr; |
198 | u32 local = t->parms.iph.saddr; | 198 | __be32 local = t->parms.iph.saddr; |
199 | u32 key = t->parms.i_key; | 199 | __be32 key = t->parms.i_key; |
200 | unsigned h = HASH(key); | 200 | unsigned h = HASH(key); |
201 | int prio = 0; | 201 | int prio = 0; |
202 | 202 | ||
@@ -236,9 +236,9 @@ static void ipgre_tunnel_unlink(struct ip_tunnel *t) | |||
236 | 236 | ||
237 | static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int create) | 237 | static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int create) |
238 | { | 238 | { |
239 | u32 remote = parms->iph.daddr; | 239 | __be32 remote = parms->iph.daddr; |
240 | u32 local = parms->iph.saddr; | 240 | __be32 local = parms->iph.saddr; |
241 | u32 key = parms->i_key; | 241 | __be32 key = parms->i_key; |
242 | struct ip_tunnel *t, **tp, *nt; | 242 | struct ip_tunnel *t, **tp, *nt; |
243 | struct net_device *dev; | 243 | struct net_device *dev; |
244 | unsigned h = HASH(key); | 244 | unsigned h = HASH(key); |
@@ -319,12 +319,12 @@ static void ipgre_err(struct sk_buff *skb, u32 info) | |||
319 | */ | 319 | */ |
320 | 320 | ||
321 | struct iphdr *iph = (struct iphdr*)skb->data; | 321 | struct iphdr *iph = (struct iphdr*)skb->data; |
322 | u16 *p = (u16*)(skb->data+(iph->ihl<<2)); | 322 | __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); |
323 | int grehlen = (iph->ihl<<2) + 4; | 323 | int grehlen = (iph->ihl<<2) + 4; |
324 | int type = skb->h.icmph->type; | 324 | int type = skb->h.icmph->type; |
325 | int code = skb->h.icmph->code; | 325 | int code = skb->h.icmph->code; |
326 | struct ip_tunnel *t; | 326 | struct ip_tunnel *t; |
327 | u16 flags; | 327 | __be16 flags; |
328 | 328 | ||
329 | flags = p[0]; | 329 | flags = p[0]; |
330 | if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { | 330 | if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { |
@@ -370,7 +370,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info) | |||
370 | } | 370 | } |
371 | 371 | ||
372 | read_lock(&ipgre_lock); | 372 | read_lock(&ipgre_lock); |
373 | t = ipgre_tunnel_lookup(iph->daddr, iph->saddr, (flags&GRE_KEY) ? *(((u32*)p) + (grehlen>>2) - 1) : 0); | 373 | t = ipgre_tunnel_lookup(iph->daddr, iph->saddr, (flags&GRE_KEY) ? *(((__be32*)p) + (grehlen>>2) - 1) : 0); |
374 | if (t == NULL || t->parms.iph.daddr == 0 || MULTICAST(t->parms.iph.daddr)) | 374 | if (t == NULL || t->parms.iph.daddr == 0 || MULTICAST(t->parms.iph.daddr)) |
375 | goto out; | 375 | goto out; |
376 | 376 | ||
@@ -388,14 +388,14 @@ out: | |||
388 | #else | 388 | #else |
389 | struct iphdr *iph = (struct iphdr*)dp; | 389 | struct iphdr *iph = (struct iphdr*)dp; |
390 | struct iphdr *eiph; | 390 | struct iphdr *eiph; |
391 | u16 *p = (u16*)(dp+(iph->ihl<<2)); | 391 | __be16 *p = (__be16*)(dp+(iph->ihl<<2)); |
392 | int type = skb->h.icmph->type; | 392 | int type = skb->h.icmph->type; |
393 | int code = skb->h.icmph->code; | 393 | int code = skb->h.icmph->code; |
394 | int rel_type = 0; | 394 | int rel_type = 0; |
395 | int rel_code = 0; | 395 | int rel_code = 0; |
396 | __be32 rel_info = 0; | 396 | __be32 rel_info = 0; |
397 | __u32 n = 0; | 397 | __u32 n = 0; |
398 | u16 flags; | 398 | __be16 flags; |
399 | int grehlen = (iph->ihl<<2) + 4; | 399 | int grehlen = (iph->ihl<<2) + 4; |
400 | struct sk_buff *skb2; | 400 | struct sk_buff *skb2; |
401 | struct flowi fl; | 401 | struct flowi fl; |
@@ -556,9 +556,9 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
556 | { | 556 | { |
557 | struct iphdr *iph; | 557 | struct iphdr *iph; |
558 | u8 *h; | 558 | u8 *h; |
559 | u16 flags; | 559 | __be16 flags; |
560 | u16 csum = 0; | 560 | __sum16 csum = 0; |
561 | u32 key = 0; | 561 | __be32 key = 0; |
562 | u32 seqno = 0; | 562 | u32 seqno = 0; |
563 | struct ip_tunnel *tunnel; | 563 | struct ip_tunnel *tunnel; |
564 | int offset = 4; | 564 | int offset = 4; |
@@ -568,7 +568,7 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
568 | 568 | ||
569 | iph = skb->nh.iph; | 569 | iph = skb->nh.iph; |
570 | h = skb->data; | 570 | h = skb->data; |
571 | flags = *(u16*)h; | 571 | flags = *(__be16*)h; |
572 | 572 | ||
573 | if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) { | 573 | if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) { |
574 | /* - Version must be 0. | 574 | /* - Version must be 0. |
@@ -580,7 +580,7 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
580 | if (flags&GRE_CSUM) { | 580 | if (flags&GRE_CSUM) { |
581 | switch (skb->ip_summed) { | 581 | switch (skb->ip_summed) { |
582 | case CHECKSUM_COMPLETE: | 582 | case CHECKSUM_COMPLETE: |
583 | csum = (u16)csum_fold(skb->csum); | 583 | csum = csum_fold(skb->csum); |
584 | if (!csum) | 584 | if (!csum) |
585 | break; | 585 | break; |
586 | /* fall through */ | 586 | /* fall through */ |
@@ -592,11 +592,11 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
592 | offset += 4; | 592 | offset += 4; |
593 | } | 593 | } |
594 | if (flags&GRE_KEY) { | 594 | if (flags&GRE_KEY) { |
595 | key = *(u32*)(h + offset); | 595 | key = *(__be32*)(h + offset); |
596 | offset += 4; | 596 | offset += 4; |
597 | } | 597 | } |
598 | if (flags&GRE_SEQ) { | 598 | if (flags&GRE_SEQ) { |
599 | seqno = ntohl(*(u32*)(h + offset)); | 599 | seqno = ntohl(*(__be32*)(h + offset)); |
600 | offset += 4; | 600 | offset += 4; |
601 | } | 601 | } |
602 | } | 602 | } |
@@ -605,7 +605,7 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
605 | if ((tunnel = ipgre_tunnel_lookup(iph->saddr, iph->daddr, key)) != NULL) { | 605 | if ((tunnel = ipgre_tunnel_lookup(iph->saddr, iph->daddr, key)) != NULL) { |
606 | secpath_reset(skb); | 606 | secpath_reset(skb); |
607 | 607 | ||
608 | skb->protocol = *(u16*)(h + 2); | 608 | skb->protocol = *(__be16*)(h + 2); |
609 | /* WCCP version 1 and 2 protocol decoding. | 609 | /* WCCP version 1 and 2 protocol decoding. |
610 | * - Change protocol to IP | 610 | * - Change protocol to IP |
611 | * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header | 611 | * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header |
@@ -673,13 +673,13 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
673 | struct iphdr *old_iph = skb->nh.iph; | 673 | struct iphdr *old_iph = skb->nh.iph; |
674 | struct iphdr *tiph; | 674 | struct iphdr *tiph; |
675 | u8 tos; | 675 | u8 tos; |
676 | u16 df; | 676 | __be16 df; |
677 | struct rtable *rt; /* Route to the other host */ | 677 | struct rtable *rt; /* Route to the other host */ |
678 | struct net_device *tdev; /* Device to other host */ | 678 | struct net_device *tdev; /* Device to other host */ |
679 | struct iphdr *iph; /* Our new IP header */ | 679 | struct iphdr *iph; /* Our new IP header */ |
680 | int max_headroom; /* The extra header space needed */ | 680 | int max_headroom; /* The extra header space needed */ |
681 | int gre_hlen; | 681 | int gre_hlen; |
682 | u32 dst; | 682 | __be32 dst; |
683 | int mtu; | 683 | int mtu; |
684 | 684 | ||
685 | if (tunnel->recursion++) { | 685 | if (tunnel->recursion++) { |
@@ -860,11 +860,11 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
860 | iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT); | 860 | iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT); |
861 | } | 861 | } |
862 | 862 | ||
863 | ((u16*)(iph+1))[0] = tunnel->parms.o_flags; | 863 | ((__be16*)(iph+1))[0] = tunnel->parms.o_flags; |
864 | ((u16*)(iph+1))[1] = skb->protocol; | 864 | ((__be16*)(iph+1))[1] = skb->protocol; |
865 | 865 | ||
866 | if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) { | 866 | if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) { |
867 | u32 *ptr = (u32*)(((u8*)iph) + tunnel->hlen - 4); | 867 | __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4); |
868 | 868 | ||
869 | if (tunnel->parms.o_flags&GRE_SEQ) { | 869 | if (tunnel->parms.o_flags&GRE_SEQ) { |
870 | ++tunnel->o_seqno; | 870 | ++tunnel->o_seqno; |
@@ -877,7 +877,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
877 | } | 877 | } |
878 | if (tunnel->parms.o_flags&GRE_CSUM) { | 878 | if (tunnel->parms.o_flags&GRE_CSUM) { |
879 | *ptr = 0; | 879 | *ptr = 0; |
880 | *(__u16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr)); | 880 | *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr)); |
881 | } | 881 | } |
882 | } | 882 | } |
883 | 883 | ||
@@ -1068,7 +1068,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned sh | |||
1068 | { | 1068 | { |
1069 | struct ip_tunnel *t = netdev_priv(dev); | 1069 | struct ip_tunnel *t = netdev_priv(dev); |
1070 | struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); | 1070 | struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); |
1071 | u16 *p = (u16*)(iph+1); | 1071 | __be16 *p = (__be16*)(iph+1); |
1072 | 1072 | ||
1073 | memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); | 1073 | memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); |
1074 | p[0] = t->parms.o_flags; | 1074 | p[0] = t->parms.o_flags; |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index fc195a44fc2e..1da3d32f8289 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -288,9 +288,8 @@ int ip_output(struct sk_buff *skb) | |||
288 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 288 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
289 | } | 289 | } |
290 | 290 | ||
291 | int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | 291 | int ip_queue_xmit(struct sk_buff *skb, struct sock *sk, int ipfragok) |
292 | { | 292 | { |
293 | struct sock *sk = skb->sk; | ||
294 | struct inet_sock *inet = inet_sk(sk); | 293 | struct inet_sock *inet = inet_sk(sk); |
295 | struct ip_options *opt = inet->opt; | 294 | struct ip_options *opt = inet->opt; |
296 | struct rtable *rt; | 295 | struct rtable *rt; |
@@ -342,7 +341,7 @@ packet_routed: | |||
342 | 341 | ||
343 | /* OK, we know where to send it, allocate and build IP header. */ | 342 | /* OK, we know where to send it, allocate and build IP header. */ |
344 | iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); | 343 | iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); |
345 | *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); | 344 | *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); |
346 | iph->tot_len = htons(skb->len); | 345 | iph->tot_len = htons(skb->len); |
347 | if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok) | 346 | if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok) |
348 | iph->frag_off = htons(IP_DF); | 347 | iph->frag_off = htons(IP_DF); |
@@ -386,6 +385,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
386 | dst_release(to->dst); | 385 | dst_release(to->dst); |
387 | to->dst = dst_clone(from->dst); | 386 | to->dst = dst_clone(from->dst); |
388 | to->dev = from->dev; | 387 | to->dev = from->dev; |
388 | to->mark = from->mark; | ||
389 | 389 | ||
390 | /* Copy the flags to each fragment. */ | 390 | /* Copy the flags to each fragment. */ |
391 | IPCB(to)->flags = IPCB(from)->flags; | 391 | IPCB(to)->flags = IPCB(from)->flags; |
@@ -394,7 +394,6 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
394 | to->tc_index = from->tc_index; | 394 | to->tc_index = from->tc_index; |
395 | #endif | 395 | #endif |
396 | #ifdef CONFIG_NETFILTER | 396 | #ifdef CONFIG_NETFILTER |
397 | to->nfmark = from->nfmark; | ||
398 | /* Connection association is same as pre-frag packet */ | 397 | /* Connection association is same as pre-frag packet */ |
399 | nf_conntrack_put(to->nfct); | 398 | nf_conntrack_put(to->nfct); |
400 | to->nfct = from->nfct; | 399 | to->nfct = from->nfct; |
@@ -683,7 +682,7 @@ ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk | |||
683 | if (memcpy_fromiovecend(to, iov, offset, len) < 0) | 682 | if (memcpy_fromiovecend(to, iov, offset, len) < 0) |
684 | return -EFAULT; | 683 | return -EFAULT; |
685 | } else { | 684 | } else { |
686 | unsigned int csum = 0; | 685 | __wsum csum = 0; |
687 | if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0) | 686 | if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0) |
688 | return -EFAULT; | 687 | return -EFAULT; |
689 | skb->csum = csum_block_add(skb->csum, csum, odd); | 688 | skb->csum = csum_block_add(skb->csum, csum, odd); |
@@ -691,11 +690,11 @@ ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk | |||
691 | return 0; | 690 | return 0; |
692 | } | 691 | } |
693 | 692 | ||
694 | static inline unsigned int | 693 | static inline __wsum |
695 | csum_page(struct page *page, int offset, int copy) | 694 | csum_page(struct page *page, int offset, int copy) |
696 | { | 695 | { |
697 | char *kaddr; | 696 | char *kaddr; |
698 | unsigned int csum; | 697 | __wsum csum; |
699 | kaddr = kmap(page); | 698 | kaddr = kmap(page); |
700 | csum = csum_partial(kaddr + offset, copy, 0); | 699 | csum = csum_partial(kaddr + offset, copy, 0); |
701 | kunmap(page); | 700 | kunmap(page); |
@@ -1167,7 +1166,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, | |||
1167 | } | 1166 | } |
1168 | 1167 | ||
1169 | if (skb->ip_summed == CHECKSUM_NONE) { | 1168 | if (skb->ip_summed == CHECKSUM_NONE) { |
1170 | unsigned int csum; | 1169 | __wsum csum; |
1171 | csum = csum_page(page, offset, len); | 1170 | csum = csum_page(page, offset, len); |
1172 | skb->csum = csum_block_add(skb->csum, csum, skb->len); | 1171 | skb->csum = csum_block_add(skb->csum, csum, skb->len); |
1173 | } | 1172 | } |
@@ -1315,7 +1314,7 @@ void ip_flush_pending_frames(struct sock *sk) | |||
1315 | static int ip_reply_glue_bits(void *dptr, char *to, int offset, | 1314 | static int ip_reply_glue_bits(void *dptr, char *to, int offset, |
1316 | int len, int odd, struct sk_buff *skb) | 1315 | int len, int odd, struct sk_buff *skb) |
1317 | { | 1316 | { |
1318 | unsigned int csum; | 1317 | __wsum csum; |
1319 | 1318 | ||
1320 | csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0); | 1319 | csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0); |
1321 | skb->csum = csum_block_add(skb->csum, csum, odd); | 1320 | skb->csum = csum_block_add(skb->csum, csum, odd); |
@@ -1385,7 +1384,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar | |||
1385 | &ipc, rt, MSG_DONTWAIT); | 1384 | &ipc, rt, MSG_DONTWAIT); |
1386 | if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { | 1385 | if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { |
1387 | if (arg->csumoffset >= 0) | 1386 | if (arg->csumoffset >= 0) |
1388 | *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum)); | 1387 | *((__sum16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum)); |
1389 | skb->ip_summed = CHECKSUM_NONE; | 1388 | skb->ip_summed = CHECKSUM_NONE; |
1390 | ip_push_pending_frames(sk); | 1389 | ip_push_pending_frames(sk); |
1391 | } | 1390 | } |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 4b132953bcc2..57d4bae6f080 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -355,7 +355,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
355 | sin = (struct sockaddr_in *)msg->msg_name; | 355 | sin = (struct sockaddr_in *)msg->msg_name; |
356 | if (sin) { | 356 | if (sin) { |
357 | sin->sin_family = AF_INET; | 357 | sin->sin_family = AF_INET; |
358 | sin->sin_addr.s_addr = *(u32*)(skb->nh.raw + serr->addr_offset); | 358 | sin->sin_addr.s_addr = *(__be32*)(skb->nh.raw + serr->addr_offset); |
359 | sin->sin_port = serr->port; | 359 | sin->sin_port = serr->port; |
360 | memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); | 360 | memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); |
361 | } | 361 | } |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 955a07abb91d..afa60b9a003f 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -101,6 +101,7 @@ | |||
101 | #define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers | 101 | #define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers |
102 | - '3' from resolv.h */ | 102 | - '3' from resolv.h */ |
103 | 103 | ||
104 | #define NONE __constant_htonl(INADDR_NONE) | ||
104 | 105 | ||
105 | /* | 106 | /* |
106 | * Public IP configuration | 107 | * Public IP configuration |
@@ -129,19 +130,19 @@ int ic_proto_enabled __initdata = 0 | |||
129 | 130 | ||
130 | static int ic_host_name_set __initdata = 0; /* Host name set by us? */ | 131 | static int ic_host_name_set __initdata = 0; /* Host name set by us? */ |
131 | 132 | ||
132 | u32 ic_myaddr = INADDR_NONE; /* My IP address */ | 133 | __be32 ic_myaddr = NONE; /* My IP address */ |
133 | static u32 ic_netmask = INADDR_NONE; /* Netmask for local subnet */ | 134 | static __be32 ic_netmask = NONE; /* Netmask for local subnet */ |
134 | u32 ic_gateway = INADDR_NONE; /* Gateway IP address */ | 135 | __be32 ic_gateway = NONE; /* Gateway IP address */ |
135 | 136 | ||
136 | u32 ic_servaddr = INADDR_NONE; /* Boot server IP address */ | 137 | __be32 ic_servaddr = NONE; /* Boot server IP address */ |
137 | 138 | ||
138 | u32 root_server_addr = INADDR_NONE; /* Address of NFS server */ | 139 | __be32 root_server_addr = NONE; /* Address of NFS server */ |
139 | u8 root_server_path[256] = { 0, }; /* Path to mount as root */ | 140 | u8 root_server_path[256] = { 0, }; /* Path to mount as root */ |
140 | 141 | ||
141 | /* Persistent data: */ | 142 | /* Persistent data: */ |
142 | 143 | ||
143 | static int ic_proto_used; /* Protocol used, if any */ | 144 | static int ic_proto_used; /* Protocol used, if any */ |
144 | static u32 ic_nameservers[CONF_NAMESERVERS_MAX]; /* DNS Server IP addresses */ | 145 | static __be32 ic_nameservers[CONF_NAMESERVERS_MAX]; /* DNS Server IP addresses */ |
145 | static u8 ic_domain[64]; /* DNS (not NIS) domain name */ | 146 | static u8 ic_domain[64]; /* DNS (not NIS) domain name */ |
146 | 147 | ||
147 | /* | 148 | /* |
@@ -172,7 +173,7 @@ struct ic_device { | |||
172 | struct net_device *dev; | 173 | struct net_device *dev; |
173 | unsigned short flags; | 174 | unsigned short flags; |
174 | short able; | 175 | short able; |
175 | u32 xid; | 176 | __be32 xid; |
176 | }; | 177 | }; |
177 | 178 | ||
178 | static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */ | 179 | static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */ |
@@ -223,7 +224,7 @@ static int __init ic_open_devs(void) | |||
223 | d->flags = oflags; | 224 | d->flags = oflags; |
224 | d->able = able; | 225 | d->able = able; |
225 | if (able & IC_BOOTP) | 226 | if (able & IC_BOOTP) |
226 | get_random_bytes(&d->xid, sizeof(u32)); | 227 | get_random_bytes(&d->xid, sizeof(__be32)); |
227 | else | 228 | else |
228 | d->xid = 0; | 229 | d->xid = 0; |
229 | ic_proto_have_if |= able; | 230 | ic_proto_have_if |= able; |
@@ -269,7 +270,7 @@ static void __init ic_close_devs(void) | |||
269 | */ | 270 | */ |
270 | 271 | ||
271 | static inline void | 272 | static inline void |
272 | set_sockaddr(struct sockaddr_in *sin, u32 addr, u16 port) | 273 | set_sockaddr(struct sockaddr_in *sin, __be32 addr, __be16 port) |
273 | { | 274 | { |
274 | sin->sin_family = AF_INET; | 275 | sin->sin_family = AF_INET; |
275 | sin->sin_addr.s_addr = addr; | 276 | sin->sin_addr.s_addr = addr; |
@@ -332,7 +333,7 @@ static int __init ic_setup_routes(void) | |||
332 | { | 333 | { |
333 | /* No need to setup device routes, only the default route... */ | 334 | /* No need to setup device routes, only the default route... */ |
334 | 335 | ||
335 | if (ic_gateway != INADDR_NONE) { | 336 | if (ic_gateway != NONE) { |
336 | struct rtentry rm; | 337 | struct rtentry rm; |
337 | int err; | 338 | int err; |
338 | 339 | ||
@@ -368,10 +369,10 @@ static int __init ic_defaults(void) | |||
368 | if (!ic_host_name_set) | 369 | if (!ic_host_name_set) |
369 | sprintf(init_utsname()->nodename, "%u.%u.%u.%u", NIPQUAD(ic_myaddr)); | 370 | sprintf(init_utsname()->nodename, "%u.%u.%u.%u", NIPQUAD(ic_myaddr)); |
370 | 371 | ||
371 | if (root_server_addr == INADDR_NONE) | 372 | if (root_server_addr == NONE) |
372 | root_server_addr = ic_servaddr; | 373 | root_server_addr = ic_servaddr; |
373 | 374 | ||
374 | if (ic_netmask == INADDR_NONE) { | 375 | if (ic_netmask == NONE) { |
375 | if (IN_CLASSA(ntohl(ic_myaddr))) | 376 | if (IN_CLASSA(ntohl(ic_myaddr))) |
376 | ic_netmask = htonl(IN_CLASSA_NET); | 377 | ic_netmask = htonl(IN_CLASSA_NET); |
377 | else if (IN_CLASSB(ntohl(ic_myaddr))) | 378 | else if (IN_CLASSB(ntohl(ic_myaddr))) |
@@ -420,7 +421,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
420 | { | 421 | { |
421 | struct arphdr *rarp; | 422 | struct arphdr *rarp; |
422 | unsigned char *rarp_ptr; | 423 | unsigned char *rarp_ptr; |
423 | u32 sip, tip; | 424 | __be32 sip, tip; |
424 | unsigned char *sha, *tha; /* s for "source", t for "target" */ | 425 | unsigned char *sha, *tha; /* s for "source", t for "target" */ |
425 | struct ic_device *d; | 426 | struct ic_device *d; |
426 | 427 | ||
@@ -485,12 +486,12 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
485 | goto drop_unlock; | 486 | goto drop_unlock; |
486 | 487 | ||
487 | /* Discard packets which are not from specified server. */ | 488 | /* Discard packets which are not from specified server. */ |
488 | if (ic_servaddr != INADDR_NONE && ic_servaddr != sip) | 489 | if (ic_servaddr != NONE && ic_servaddr != sip) |
489 | goto drop_unlock; | 490 | goto drop_unlock; |
490 | 491 | ||
491 | /* We have a winner! */ | 492 | /* We have a winner! */ |
492 | ic_dev = dev; | 493 | ic_dev = dev; |
493 | if (ic_myaddr == INADDR_NONE) | 494 | if (ic_myaddr == NONE) |
494 | ic_myaddr = tip; | 495 | ic_myaddr = tip; |
495 | ic_servaddr = sip; | 496 | ic_servaddr = sip; |
496 | ic_got_reply = IC_RARP; | 497 | ic_got_reply = IC_RARP; |
@@ -530,13 +531,13 @@ struct bootp_pkt { /* BOOTP packet format */ | |||
530 | u8 htype; /* HW address type */ | 531 | u8 htype; /* HW address type */ |
531 | u8 hlen; /* HW address length */ | 532 | u8 hlen; /* HW address length */ |
532 | u8 hops; /* Used only by gateways */ | 533 | u8 hops; /* Used only by gateways */ |
533 | u32 xid; /* Transaction ID */ | 534 | __be32 xid; /* Transaction ID */ |
534 | u16 secs; /* Seconds since we started */ | 535 | __be16 secs; /* Seconds since we started */ |
535 | u16 flags; /* Just what it says */ | 536 | __be16 flags; /* Just what it says */ |
536 | u32 client_ip; /* Client's IP address if known */ | 537 | __be32 client_ip; /* Client's IP address if known */ |
537 | u32 your_ip; /* Assigned IP address */ | 538 | __be32 your_ip; /* Assigned IP address */ |
538 | u32 server_ip; /* (Next, e.g. NFS) Server's IP address */ | 539 | __be32 server_ip; /* (Next, e.g. NFS) Server's IP address */ |
539 | u32 relay_ip; /* IP address of BOOTP relay */ | 540 | __be32 relay_ip; /* IP address of BOOTP relay */ |
540 | u8 hw_addr[16]; /* Client's HW address */ | 541 | u8 hw_addr[16]; /* Client's HW address */ |
541 | u8 serv_name[64]; /* Server host name */ | 542 | u8 serv_name[64]; /* Server host name */ |
542 | u8 boot_file[128]; /* Name of boot file */ | 543 | u8 boot_file[128]; /* Name of boot file */ |
@@ -576,7 +577,7 @@ static const u8 ic_bootp_cookie[4] = { 99, 130, 83, 99 }; | |||
576 | static void __init | 577 | static void __init |
577 | ic_dhcp_init_options(u8 *options) | 578 | ic_dhcp_init_options(u8 *options) |
578 | { | 579 | { |
579 | u8 mt = ((ic_servaddr == INADDR_NONE) | 580 | u8 mt = ((ic_servaddr == NONE) |
580 | ? DHCPDISCOVER : DHCPREQUEST); | 581 | ? DHCPDISCOVER : DHCPREQUEST); |
581 | u8 *e = options; | 582 | u8 *e = options; |
582 | 583 | ||
@@ -666,7 +667,7 @@ static inline void ic_bootp_init(void) | |||
666 | int i; | 667 | int i; |
667 | 668 | ||
668 | for (i = 0; i < CONF_NAMESERVERS_MAX; i++) | 669 | for (i = 0; i < CONF_NAMESERVERS_MAX; i++) |
669 | ic_nameservers[i] = INADDR_NONE; | 670 | ic_nameservers[i] = NONE; |
670 | 671 | ||
671 | dev_add_pack(&bootp_packet_type); | 672 | dev_add_pack(&bootp_packet_type); |
672 | } | 673 | } |
@@ -708,7 +709,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d | |||
708 | h->frag_off = htons(IP_DF); | 709 | h->frag_off = htons(IP_DF); |
709 | h->ttl = 64; | 710 | h->ttl = 64; |
710 | h->protocol = IPPROTO_UDP; | 711 | h->protocol = IPPROTO_UDP; |
711 | h->daddr = INADDR_BROADCAST; | 712 | h->daddr = htonl(INADDR_BROADCAST); |
712 | h->check = ip_fast_csum((unsigned char *) h, h->ihl); | 713 | h->check = ip_fast_csum((unsigned char *) h, h->ihl); |
713 | 714 | ||
714 | /* Construct UDP header */ | 715 | /* Construct UDP header */ |
@@ -730,8 +731,8 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d | |||
730 | b->htype = dev->type; /* can cause undefined behavior */ | 731 | b->htype = dev->type; /* can cause undefined behavior */ |
731 | } | 732 | } |
732 | b->hlen = dev->addr_len; | 733 | b->hlen = dev->addr_len; |
733 | b->your_ip = INADDR_NONE; | 734 | b->your_ip = NONE; |
734 | b->server_ip = INADDR_NONE; | 735 | b->server_ip = NONE; |
735 | memcpy(b->hw_addr, dev->dev_addr, dev->addr_len); | 736 | memcpy(b->hw_addr, dev->dev_addr, dev->addr_len); |
736 | b->secs = htons(jiffies_diff / HZ); | 737 | b->secs = htons(jiffies_diff / HZ); |
737 | b->xid = d->xid; | 738 | b->xid = d->xid; |
@@ -788,11 +789,11 @@ static void __init ic_do_bootp_ext(u8 *ext) | |||
788 | 789 | ||
789 | switch (*ext++) { | 790 | switch (*ext++) { |
790 | case 1: /* Subnet mask */ | 791 | case 1: /* Subnet mask */ |
791 | if (ic_netmask == INADDR_NONE) | 792 | if (ic_netmask == NONE) |
792 | memcpy(&ic_netmask, ext+1, 4); | 793 | memcpy(&ic_netmask, ext+1, 4); |
793 | break; | 794 | break; |
794 | case 3: /* Default gateway */ | 795 | case 3: /* Default gateway */ |
795 | if (ic_gateway == INADDR_NONE) | 796 | if (ic_gateway == NONE) |
796 | memcpy(&ic_gateway, ext+1, 4); | 797 | memcpy(&ic_gateway, ext+1, 4); |
797 | break; | 798 | break; |
798 | case 6: /* DNS server */ | 799 | case 6: /* DNS server */ |
@@ -800,7 +801,7 @@ static void __init ic_do_bootp_ext(u8 *ext) | |||
800 | if (servers > CONF_NAMESERVERS_MAX) | 801 | if (servers > CONF_NAMESERVERS_MAX) |
801 | servers = CONF_NAMESERVERS_MAX; | 802 | servers = CONF_NAMESERVERS_MAX; |
802 | for (i = 0; i < servers; i++) { | 803 | for (i = 0; i < servers; i++) { |
803 | if (ic_nameservers[i] == INADDR_NONE) | 804 | if (ic_nameservers[i] == NONE) |
804 | memcpy(&ic_nameservers[i], ext+1+4*i, 4); | 805 | memcpy(&ic_nameservers[i], ext+1+4*i, 4); |
805 | } | 806 | } |
806 | break; | 807 | break; |
@@ -917,7 +918,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str | |||
917 | 918 | ||
918 | #ifdef IPCONFIG_DHCP | 919 | #ifdef IPCONFIG_DHCP |
919 | if (ic_proto_enabled & IC_USE_DHCP) { | 920 | if (ic_proto_enabled & IC_USE_DHCP) { |
920 | u32 server_id = INADDR_NONE; | 921 | __be32 server_id = NONE; |
921 | int mt = 0; | 922 | int mt = 0; |
922 | 923 | ||
923 | ext = &b->exten[4]; | 924 | ext = &b->exten[4]; |
@@ -949,7 +950,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str | |||
949 | /* While in the process of accepting one offer, | 950 | /* While in the process of accepting one offer, |
950 | * ignore all others. | 951 | * ignore all others. |
951 | */ | 952 | */ |
952 | if (ic_myaddr != INADDR_NONE) | 953 | if (ic_myaddr != NONE) |
953 | goto drop_unlock; | 954 | goto drop_unlock; |
954 | 955 | ||
955 | /* Let's accept that offer. */ | 956 | /* Let's accept that offer. */ |
@@ -965,7 +966,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str | |||
965 | * precedence over the bootp header one if | 966 | * precedence over the bootp header one if |
966 | * they are different. | 967 | * they are different. |
967 | */ | 968 | */ |
968 | if ((server_id != INADDR_NONE) && | 969 | if ((server_id != NONE) && |
969 | (b->server_ip != server_id)) | 970 | (b->server_ip != server_id)) |
970 | b->server_ip = ic_servaddr; | 971 | b->server_ip = ic_servaddr; |
971 | break; | 972 | break; |
@@ -979,8 +980,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str | |||
979 | 980 | ||
980 | default: | 981 | default: |
981 | /* Urque. Forget it*/ | 982 | /* Urque. Forget it*/ |
982 | ic_myaddr = INADDR_NONE; | 983 | ic_myaddr = NONE; |
983 | ic_servaddr = INADDR_NONE; | 984 | ic_servaddr = NONE; |
984 | goto drop_unlock; | 985 | goto drop_unlock; |
985 | }; | 986 | }; |
986 | 987 | ||
@@ -1004,9 +1005,9 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str | |||
1004 | ic_dev = dev; | 1005 | ic_dev = dev; |
1005 | ic_myaddr = b->your_ip; | 1006 | ic_myaddr = b->your_ip; |
1006 | ic_servaddr = b->server_ip; | 1007 | ic_servaddr = b->server_ip; |
1007 | if (ic_gateway == INADDR_NONE && b->relay_ip) | 1008 | if (ic_gateway == NONE && b->relay_ip) |
1008 | ic_gateway = b->relay_ip; | 1009 | ic_gateway = b->relay_ip; |
1009 | if (ic_nameservers[0] == INADDR_NONE) | 1010 | if (ic_nameservers[0] == NONE) |
1010 | ic_nameservers[0] = ic_servaddr; | 1011 | ic_nameservers[0] = ic_servaddr; |
1011 | ic_got_reply = IC_BOOTP; | 1012 | ic_got_reply = IC_BOOTP; |
1012 | 1013 | ||
@@ -1150,7 +1151,7 @@ static int __init ic_dynamic(void) | |||
1150 | #endif | 1151 | #endif |
1151 | 1152 | ||
1152 | if (!ic_got_reply) { | 1153 | if (!ic_got_reply) { |
1153 | ic_myaddr = INADDR_NONE; | 1154 | ic_myaddr = NONE; |
1154 | return -1; | 1155 | return -1; |
1155 | } | 1156 | } |
1156 | 1157 | ||
@@ -1182,12 +1183,12 @@ static int pnp_seq_show(struct seq_file *seq, void *v) | |||
1182 | seq_printf(seq, | 1183 | seq_printf(seq, |
1183 | "domain %s\n", ic_domain); | 1184 | "domain %s\n", ic_domain); |
1184 | for (i = 0; i < CONF_NAMESERVERS_MAX; i++) { | 1185 | for (i = 0; i < CONF_NAMESERVERS_MAX; i++) { |
1185 | if (ic_nameservers[i] != INADDR_NONE) | 1186 | if (ic_nameservers[i] != NONE) |
1186 | seq_printf(seq, | 1187 | seq_printf(seq, |
1187 | "nameserver %u.%u.%u.%u\n", | 1188 | "nameserver %u.%u.%u.%u\n", |
1188 | NIPQUAD(ic_nameservers[i])); | 1189 | NIPQUAD(ic_nameservers[i])); |
1189 | } | 1190 | } |
1190 | if (ic_servaddr != INADDR_NONE) | 1191 | if (ic_servaddr != NONE) |
1191 | seq_printf(seq, | 1192 | seq_printf(seq, |
1192 | "bootserver %u.%u.%u.%u\n", | 1193 | "bootserver %u.%u.%u.%u\n", |
1193 | NIPQUAD(ic_servaddr)); | 1194 | NIPQUAD(ic_servaddr)); |
@@ -1213,9 +1214,9 @@ static struct file_operations pnp_seq_fops = { | |||
1213 | * need to have root_server_addr set _before_ IPConfig gets called as it | 1214 | * need to have root_server_addr set _before_ IPConfig gets called as it |
1214 | * can override it. | 1215 | * can override it. |
1215 | */ | 1216 | */ |
1216 | u32 __init root_nfs_parse_addr(char *name) | 1217 | __be32 __init root_nfs_parse_addr(char *name) |
1217 | { | 1218 | { |
1218 | u32 addr; | 1219 | __be32 addr; |
1219 | int octets = 0; | 1220 | int octets = 0; |
1220 | char *cp, *cq; | 1221 | char *cp, *cq; |
1221 | 1222 | ||
@@ -1237,7 +1238,7 @@ u32 __init root_nfs_parse_addr(char *name) | |||
1237 | addr = in_aton(name); | 1238 | addr = in_aton(name); |
1238 | memmove(name, cp, strlen(cp) + 1); | 1239 | memmove(name, cp, strlen(cp) + 1); |
1239 | } else | 1240 | } else |
1240 | addr = INADDR_NONE; | 1241 | addr = NONE; |
1241 | 1242 | ||
1242 | return addr; | 1243 | return addr; |
1243 | } | 1244 | } |
@@ -1248,7 +1249,7 @@ u32 __init root_nfs_parse_addr(char *name) | |||
1248 | 1249 | ||
1249 | static int __init ip_auto_config(void) | 1250 | static int __init ip_auto_config(void) |
1250 | { | 1251 | { |
1251 | u32 addr; | 1252 | __be32 addr; |
1252 | 1253 | ||
1253 | #ifdef CONFIG_PROC_FS | 1254 | #ifdef CONFIG_PROC_FS |
1254 | proc_net_fops_create("pnp", S_IRUGO, &pnp_seq_fops); | 1255 | proc_net_fops_create("pnp", S_IRUGO, &pnp_seq_fops); |
@@ -1277,11 +1278,11 @@ static int __init ip_auto_config(void) | |||
1277 | * interfaces and no default was set), use BOOTP or RARP to get the | 1278 | * interfaces and no default was set), use BOOTP or RARP to get the |
1278 | * missing values. | 1279 | * missing values. |
1279 | */ | 1280 | */ |
1280 | if (ic_myaddr == INADDR_NONE || | 1281 | if (ic_myaddr == NONE || |
1281 | #ifdef CONFIG_ROOT_NFS | 1282 | #ifdef CONFIG_ROOT_NFS |
1282 | (MAJOR(ROOT_DEV) == UNNAMED_MAJOR | 1283 | (MAJOR(ROOT_DEV) == UNNAMED_MAJOR |
1283 | && root_server_addr == INADDR_NONE | 1284 | && root_server_addr == NONE |
1284 | && ic_servaddr == INADDR_NONE) || | 1285 | && ic_servaddr == NONE) || |
1285 | #endif | 1286 | #endif |
1286 | ic_first_dev->next) { | 1287 | ic_first_dev->next) { |
1287 | #ifdef IPCONFIG_DYNAMIC | 1288 | #ifdef IPCONFIG_DYNAMIC |
@@ -1334,7 +1335,7 @@ static int __init ip_auto_config(void) | |||
1334 | } | 1335 | } |
1335 | 1336 | ||
1336 | addr = root_nfs_parse_addr(root_server_path); | 1337 | addr = root_nfs_parse_addr(root_server_path); |
1337 | if (root_server_addr == INADDR_NONE) | 1338 | if (root_server_addr == NONE) |
1338 | root_server_addr = addr; | 1339 | root_server_addr = addr; |
1339 | 1340 | ||
1340 | /* | 1341 | /* |
@@ -1461,19 +1462,19 @@ static int __init ip_auto_config_setup(char *addrs) | |||
1461 | switch (num) { | 1462 | switch (num) { |
1462 | case 0: | 1463 | case 0: |
1463 | if ((ic_myaddr = in_aton(ip)) == INADDR_ANY) | 1464 | if ((ic_myaddr = in_aton(ip)) == INADDR_ANY) |
1464 | ic_myaddr = INADDR_NONE; | 1465 | ic_myaddr = NONE; |
1465 | break; | 1466 | break; |
1466 | case 1: | 1467 | case 1: |
1467 | if ((ic_servaddr = in_aton(ip)) == INADDR_ANY) | 1468 | if ((ic_servaddr = in_aton(ip)) == INADDR_ANY) |
1468 | ic_servaddr = INADDR_NONE; | 1469 | ic_servaddr = NONE; |
1469 | break; | 1470 | break; |
1470 | case 2: | 1471 | case 2: |
1471 | if ((ic_gateway = in_aton(ip)) == INADDR_ANY) | 1472 | if ((ic_gateway = in_aton(ip)) == INADDR_ANY) |
1472 | ic_gateway = INADDR_NONE; | 1473 | ic_gateway = NONE; |
1473 | break; | 1474 | break; |
1474 | case 3: | 1475 | case 3: |
1475 | if ((ic_netmask = in_aton(ip)) == INADDR_ANY) | 1476 | if ((ic_netmask = in_aton(ip)) == INADDR_ANY) |
1476 | ic_netmask = INADDR_NONE; | 1477 | ic_netmask = NONE; |
1477 | break; | 1478 | break; |
1478 | case 4: | 1479 | case 4: |
1479 | if ((dp = strchr(ip, '.'))) { | 1480 | if ((dp = strchr(ip, '.'))) { |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 0c4556529228..9d719d664e5b 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -118,7 +118,7 @@ | |||
118 | #include <net/xfrm.h> | 118 | #include <net/xfrm.h> |
119 | 119 | ||
120 | #define HASH_SIZE 16 | 120 | #define HASH_SIZE 16 |
121 | #define HASH(addr) ((addr^(addr>>4))&0xF) | 121 | #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) |
122 | 122 | ||
123 | static int ipip_fb_tunnel_init(struct net_device *dev); | 123 | static int ipip_fb_tunnel_init(struct net_device *dev); |
124 | static int ipip_tunnel_init(struct net_device *dev); | 124 | static int ipip_tunnel_init(struct net_device *dev); |
@@ -134,7 +134,7 @@ static struct ip_tunnel **tunnels[4] = { tunnels_wc, tunnels_l, tunnels_r, tunne | |||
134 | 134 | ||
135 | static DEFINE_RWLOCK(ipip_lock); | 135 | static DEFINE_RWLOCK(ipip_lock); |
136 | 136 | ||
137 | static struct ip_tunnel * ipip_tunnel_lookup(u32 remote, u32 local) | 137 | static struct ip_tunnel * ipip_tunnel_lookup(__be32 remote, __be32 local) |
138 | { | 138 | { |
139 | unsigned h0 = HASH(remote); | 139 | unsigned h0 = HASH(remote); |
140 | unsigned h1 = HASH(local); | 140 | unsigned h1 = HASH(local); |
@@ -160,8 +160,8 @@ static struct ip_tunnel * ipip_tunnel_lookup(u32 remote, u32 local) | |||
160 | 160 | ||
161 | static struct ip_tunnel **ipip_bucket(struct ip_tunnel *t) | 161 | static struct ip_tunnel **ipip_bucket(struct ip_tunnel *t) |
162 | { | 162 | { |
163 | u32 remote = t->parms.iph.daddr; | 163 | __be32 remote = t->parms.iph.daddr; |
164 | u32 local = t->parms.iph.saddr; | 164 | __be32 local = t->parms.iph.saddr; |
165 | unsigned h = 0; | 165 | unsigned h = 0; |
166 | int prio = 0; | 166 | int prio = 0; |
167 | 167 | ||
@@ -203,8 +203,8 @@ static void ipip_tunnel_link(struct ip_tunnel *t) | |||
203 | 203 | ||
204 | static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int create) | 204 | static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int create) |
205 | { | 205 | { |
206 | u32 remote = parms->iph.daddr; | 206 | __be32 remote = parms->iph.daddr; |
207 | u32 local = parms->iph.saddr; | 207 | __be32 local = parms->iph.saddr; |
208 | struct ip_tunnel *t, **tp, *nt; | 208 | struct ip_tunnel *t, **tp, *nt; |
209 | struct net_device *dev; | 209 | struct net_device *dev; |
210 | unsigned h = 0; | 210 | unsigned h = 0; |
@@ -519,13 +519,13 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
519 | struct net_device_stats *stats = &tunnel->stat; | 519 | struct net_device_stats *stats = &tunnel->stat; |
520 | struct iphdr *tiph = &tunnel->parms.iph; | 520 | struct iphdr *tiph = &tunnel->parms.iph; |
521 | u8 tos = tunnel->parms.iph.tos; | 521 | u8 tos = tunnel->parms.iph.tos; |
522 | u16 df = tiph->frag_off; | 522 | __be16 df = tiph->frag_off; |
523 | struct rtable *rt; /* Route to the other host */ | 523 | struct rtable *rt; /* Route to the other host */ |
524 | struct net_device *tdev; /* Device to other host */ | 524 | struct net_device *tdev; /* Device to other host */ |
525 | struct iphdr *old_iph = skb->nh.iph; | 525 | struct iphdr *old_iph = skb->nh.iph; |
526 | struct iphdr *iph; /* Our new IP header */ | 526 | struct iphdr *iph; /* Our new IP header */ |
527 | int max_headroom; /* The extra header space needed */ | 527 | int max_headroom; /* The extra header space needed */ |
528 | u32 dst = tiph->daddr; | 528 | __be32 dst = tiph->daddr; |
529 | int mtu; | 529 | int mtu; |
530 | 530 | ||
531 | if (tunnel->recursion++) { | 531 | if (tunnel->recursion++) { |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 97cfa97c8abb..efcf45ecc818 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -1493,7 +1493,7 @@ static int pim_rcv(struct sk_buff * skb) | |||
1493 | if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || | 1493 | if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || |
1494 | (pim->flags&PIM_NULL_REGISTER) || | 1494 | (pim->flags&PIM_NULL_REGISTER) || |
1495 | (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && | 1495 | (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && |
1496 | (u16)csum_fold(skb_checksum(skb, 0, skb->len, 0)))) | 1496 | csum_fold(skb_checksum(skb, 0, skb->len, 0)))) |
1497 | goto drop; | 1497 | goto drop; |
1498 | 1498 | ||
1499 | /* check if the inner packet is destined to mcast group */ | 1499 | /* check if the inner packet is destined to mcast group */ |
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c index e7752334d296..6c40899aa161 100644 --- a/net/ipv4/ipvs/ip_vs_app.c +++ b/net/ipv4/ipvs/ip_vs_app.c | |||
@@ -80,10 +80,9 @@ ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port) | |||
80 | if (!pp->unregister_app) | 80 | if (!pp->unregister_app) |
81 | return -EOPNOTSUPP; | 81 | return -EOPNOTSUPP; |
82 | 82 | ||
83 | inc = kmalloc(sizeof(struct ip_vs_app), GFP_KERNEL); | 83 | inc = kmemdup(app, sizeof(*inc), GFP_KERNEL); |
84 | if (!inc) | 84 | if (!inc) |
85 | return -ENOMEM; | 85 | return -ENOMEM; |
86 | memcpy(inc, app, sizeof(*inc)); | ||
87 | INIT_LIST_HEAD(&inc->p_list); | 86 | INIT_LIST_HEAD(&inc->p_list); |
88 | INIT_LIST_HEAD(&inc->incs_list); | 87 | INIT_LIST_HEAD(&inc->incs_list); |
89 | inc->app = app; | 88 | inc->app = app; |
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index 1445bb47fea4..34257520a3a6 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c | |||
@@ -536,9 +536,9 @@ static unsigned int ip_vs_post_routing(unsigned int hooknum, | |||
536 | return NF_STOP; | 536 | return NF_STOP; |
537 | } | 537 | } |
538 | 538 | ||
539 | u16 ip_vs_checksum_complete(struct sk_buff *skb, int offset) | 539 | __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset) |
540 | { | 540 | { |
541 | return (u16) csum_fold(skb_checksum(skb, offset, skb->len - offset, 0)); | 541 | return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0)); |
542 | } | 542 | } |
543 | 543 | ||
544 | static inline struct sk_buff * | 544 | static inline struct sk_buff * |
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c index c4528b5c800d..e844ddb82b9a 100644 --- a/net/ipv4/ipvs/ip_vs_proto.c +++ b/net/ipv4/ipvs/ip_vs_proto.c | |||
@@ -118,13 +118,7 @@ void ip_vs_protocol_timeout_change(int flags) | |||
118 | int * | 118 | int * |
119 | ip_vs_create_timeout_table(int *table, int size) | 119 | ip_vs_create_timeout_table(int *table, int size) |
120 | { | 120 | { |
121 | int *t; | 121 | return kmemdup(table, size, GFP_ATOMIC); |
122 | |||
123 | t = kmalloc(size, GFP_ATOMIC); | ||
124 | if (t == NULL) | ||
125 | return NULL; | ||
126 | memcpy(t, table, size); | ||
127 | return t; | ||
128 | } | 122 | } |
129 | 123 | ||
130 | 124 | ||
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c index 6ff05c3a32e6..16a9ebee2fe6 100644 --- a/net/ipv4/ipvs/ip_vs_proto_tcp.c +++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c | |||
@@ -84,7 +84,7 @@ tcp_conn_schedule(struct sk_buff *skb, | |||
84 | } | 84 | } |
85 | 85 | ||
86 | if (th->syn && | 86 | if (th->syn && |
87 | (svc = ip_vs_service_get(skb->nfmark, skb->nh.iph->protocol, | 87 | (svc = ip_vs_service_get(skb->mark, skb->nh.iph->protocol, |
88 | skb->nh.iph->daddr, th->dest))) { | 88 | skb->nh.iph->daddr, th->dest))) { |
89 | if (ip_vs_todrop()) { | 89 | if (ip_vs_todrop()) { |
90 | /* | 90 | /* |
@@ -116,9 +116,9 @@ tcp_fast_csum_update(struct tcphdr *tcph, __be32 oldip, __be32 newip, | |||
116 | __be16 oldport, __be16 newport) | 116 | __be16 oldport, __be16 newport) |
117 | { | 117 | { |
118 | tcph->check = | 118 | tcph->check = |
119 | ip_vs_check_diff(~oldip, newip, | 119 | csum_fold(ip_vs_check_diff4(oldip, newip, |
120 | ip_vs_check_diff(oldport ^ htons(0xFFFF), | 120 | ip_vs_check_diff2(oldport, newport, |
121 | newport, tcph->check)); | 121 | ~csum_unfold(tcph->check)))); |
122 | } | 122 | } |
123 | 123 | ||
124 | 124 | ||
@@ -490,16 +490,18 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction, | |||
490 | static struct list_head tcp_apps[TCP_APP_TAB_SIZE]; | 490 | static struct list_head tcp_apps[TCP_APP_TAB_SIZE]; |
491 | static DEFINE_SPINLOCK(tcp_app_lock); | 491 | static DEFINE_SPINLOCK(tcp_app_lock); |
492 | 492 | ||
493 | static inline __u16 tcp_app_hashkey(__u16 port) | 493 | static inline __u16 tcp_app_hashkey(__be16 port) |
494 | { | 494 | { |
495 | return ((port >> TCP_APP_TAB_BITS) ^ port) & TCP_APP_TAB_MASK; | 495 | return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port) |
496 | & TCP_APP_TAB_MASK; | ||
496 | } | 497 | } |
497 | 498 | ||
498 | 499 | ||
499 | static int tcp_register_app(struct ip_vs_app *inc) | 500 | static int tcp_register_app(struct ip_vs_app *inc) |
500 | { | 501 | { |
501 | struct ip_vs_app *i; | 502 | struct ip_vs_app *i; |
502 | __u16 hash, port = inc->port; | 503 | __u16 hash; |
504 | __be16 port = inc->port; | ||
503 | int ret = 0; | 505 | int ret = 0; |
504 | 506 | ||
505 | hash = tcp_app_hashkey(port); | 507 | hash = tcp_app_hashkey(port); |
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c index 691c8b637b29..03f0a414cfa4 100644 --- a/net/ipv4/ipvs/ip_vs_proto_udp.c +++ b/net/ipv4/ipvs/ip_vs_proto_udp.c | |||
@@ -89,7 +89,7 @@ udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp, | |||
89 | return 0; | 89 | return 0; |
90 | } | 90 | } |
91 | 91 | ||
92 | if ((svc = ip_vs_service_get(skb->nfmark, skb->nh.iph->protocol, | 92 | if ((svc = ip_vs_service_get(skb->mark, skb->nh.iph->protocol, |
93 | skb->nh.iph->daddr, uh->dest))) { | 93 | skb->nh.iph->daddr, uh->dest))) { |
94 | if (ip_vs_todrop()) { | 94 | if (ip_vs_todrop()) { |
95 | /* | 95 | /* |
@@ -121,11 +121,11 @@ udp_fast_csum_update(struct udphdr *uhdr, __be32 oldip, __be32 newip, | |||
121 | __be16 oldport, __be16 newport) | 121 | __be16 oldport, __be16 newport) |
122 | { | 122 | { |
123 | uhdr->check = | 123 | uhdr->check = |
124 | ip_vs_check_diff(~oldip, newip, | 124 | csum_fold(ip_vs_check_diff4(oldip, newip, |
125 | ip_vs_check_diff(oldport ^ htons(0xFFFF), | 125 | ip_vs_check_diff2(oldport, newport, |
126 | newport, uhdr->check)); | 126 | ~csum_unfold(uhdr->check)))); |
127 | if (!uhdr->check) | 127 | if (!uhdr->check) |
128 | uhdr->check = -1; | 128 | uhdr->check = CSUM_MANGLED_0; |
129 | } | 129 | } |
130 | 130 | ||
131 | static int | 131 | static int |
@@ -173,7 +173,7 @@ udp_snat_handler(struct sk_buff **pskb, | |||
173 | cp->protocol, | 173 | cp->protocol, |
174 | (*pskb)->csum); | 174 | (*pskb)->csum); |
175 | if (udph->check == 0) | 175 | if (udph->check == 0) |
176 | udph->check = -1; | 176 | udph->check = CSUM_MANGLED_0; |
177 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", | 177 | IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", |
178 | pp->name, udph->check, | 178 | pp->name, udph->check, |
179 | (char*)&(udph->check) - (char*)udph); | 179 | (char*)&(udph->check) - (char*)udph); |
@@ -228,7 +228,7 @@ udp_dnat_handler(struct sk_buff **pskb, | |||
228 | cp->protocol, | 228 | cp->protocol, |
229 | (*pskb)->csum); | 229 | (*pskb)->csum); |
230 | if (udph->check == 0) | 230 | if (udph->check == 0) |
231 | udph->check = -1; | 231 | udph->check = CSUM_MANGLED_0; |
232 | (*pskb)->ip_summed = CHECKSUM_UNNECESSARY; | 232 | (*pskb)->ip_summed = CHECKSUM_UNNECESSARY; |
233 | } | 233 | } |
234 | return 1; | 234 | return 1; |
@@ -282,16 +282,18 @@ udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) | |||
282 | static struct list_head udp_apps[UDP_APP_TAB_SIZE]; | 282 | static struct list_head udp_apps[UDP_APP_TAB_SIZE]; |
283 | static DEFINE_SPINLOCK(udp_app_lock); | 283 | static DEFINE_SPINLOCK(udp_app_lock); |
284 | 284 | ||
285 | static inline __u16 udp_app_hashkey(__u16 port) | 285 | static inline __u16 udp_app_hashkey(__be16 port) |
286 | { | 286 | { |
287 | return ((port >> UDP_APP_TAB_BITS) ^ port) & UDP_APP_TAB_MASK; | 287 | return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port) |
288 | & UDP_APP_TAB_MASK; | ||
288 | } | 289 | } |
289 | 290 | ||
290 | 291 | ||
291 | static int udp_register_app(struct ip_vs_app *inc) | 292 | static int udp_register_app(struct ip_vs_app *inc) |
292 | { | 293 | { |
293 | struct ip_vs_app *i; | 294 | struct ip_vs_app *i; |
294 | __u16 hash, port = inc->port; | 295 | __u16 hash; |
296 | __be16 port = inc->port; | ||
295 | int ret = 0; | 297 | int ret = 0; |
296 | 298 | ||
297 | hash = udp_app_hashkey(port); | 299 | hash = udp_app_hashkey(port); |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index e2005c6810a4..a68966059b50 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -27,9 +27,7 @@ int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type) | |||
27 | fl.nl_u.ip4_u.saddr = iph->saddr; | 27 | fl.nl_u.ip4_u.saddr = iph->saddr; |
28 | fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); | 28 | fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); |
29 | fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0; | 29 | fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0; |
30 | #ifdef CONFIG_IP_ROUTE_FWMARK | 30 | fl.mark = (*pskb)->mark; |
31 | fl.nl_u.ip4_u.fwmark = (*pskb)->nfmark; | ||
32 | #endif | ||
33 | if (ip_route_output_key(&rt, &fl) != 0) | 31 | if (ip_route_output_key(&rt, &fl) != 0) |
34 | return -1; | 32 | return -1; |
35 | 33 | ||
@@ -164,17 +162,17 @@ static int nf_ip_reroute(struct sk_buff **pskb, const struct nf_info *info) | |||
164 | return 0; | 162 | return 0; |
165 | } | 163 | } |
166 | 164 | ||
167 | unsigned int nf_ip_checksum(struct sk_buff *skb, unsigned int hook, | 165 | __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, |
168 | unsigned int dataoff, u_int8_t protocol) | 166 | unsigned int dataoff, u_int8_t protocol) |
169 | { | 167 | { |
170 | struct iphdr *iph = skb->nh.iph; | 168 | struct iphdr *iph = skb->nh.iph; |
171 | unsigned int csum = 0; | 169 | __sum16 csum = 0; |
172 | 170 | ||
173 | switch (skb->ip_summed) { | 171 | switch (skb->ip_summed) { |
174 | case CHECKSUM_COMPLETE: | 172 | case CHECKSUM_COMPLETE: |
175 | if (hook != NF_IP_PRE_ROUTING && hook != NF_IP_LOCAL_IN) | 173 | if (hook != NF_IP_PRE_ROUTING && hook != NF_IP_LOCAL_IN) |
176 | break; | 174 | break; |
177 | if ((protocol == 0 && !(u16)csum_fold(skb->csum)) || | 175 | if ((protocol == 0 && !csum_fold(skb->csum)) || |
178 | !csum_tcpudp_magic(iph->saddr, iph->daddr, | 176 | !csum_tcpudp_magic(iph->saddr, iph->daddr, |
179 | skb->len - dataoff, protocol, | 177 | skb->len - dataoff, protocol, |
180 | skb->csum)) { | 178 | skb->csum)) { |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index d88c292f118c..363df9976c9d 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -6,7 +6,7 @@ menu "IP: Netfilter Configuration" | |||
6 | depends on INET && NETFILTER | 6 | depends on INET && NETFILTER |
7 | 7 | ||
8 | config NF_CONNTRACK_IPV4 | 8 | config NF_CONNTRACK_IPV4 |
9 | tristate "IPv4 support for new connection tracking (EXPERIMENTAL)" | 9 | tristate "IPv4 connection tracking support (required for NAT) (EXPERIMENTAL)" |
10 | depends on EXPERIMENTAL && NF_CONNTRACK | 10 | depends on EXPERIMENTAL && NF_CONNTRACK |
11 | ---help--- | 11 | ---help--- |
12 | Connection tracking keeps a record of what packets have passed | 12 | Connection tracking keeps a record of what packets have passed |
@@ -19,21 +19,18 @@ config NF_CONNTRACK_IPV4 | |||
19 | 19 | ||
20 | To compile it as a module, choose M here. If unsure, say N. | 20 | To compile it as a module, choose M here. If unsure, say N. |
21 | 21 | ||
22 | # connection tracking, helpers and protocols | 22 | config NF_CONNTRACK_PROC_COMPAT |
23 | config IP_NF_CONNTRACK | 23 | bool "proc/sysctl compatibility with old connection tracking" |
24 | tristate "Connection tracking (required for masq/NAT)" | 24 | depends on NF_CONNTRACK_IPV4 |
25 | ---help--- | 25 | default y |
26 | Connection tracking keeps a record of what packets have passed | 26 | help |
27 | through your machine, in order to figure out how they are related | 27 | This option enables /proc and sysctl compatibility with the old |
28 | into connections. | 28 | layer 3 dependant connection tracking. This is needed to keep |
29 | 29 | old programs that have not been adapted to the new names working. | |
30 | This is required to do Masquerading or other kinds of Network | ||
31 | Address Translation (except for Fast NAT). It can also be used to | ||
32 | enhance packet filtering (see `Connection state match support' | ||
33 | below). | ||
34 | 30 | ||
35 | To compile it as a module, choose M here. If unsure, say N. | 31 | If unsure, say Y. |
36 | 32 | ||
33 | # connection tracking, helpers and protocols | ||
37 | config IP_NF_CT_ACCT | 34 | config IP_NF_CT_ACCT |
38 | bool "Connection tracking flow accounting" | 35 | bool "Connection tracking flow accounting" |
39 | depends on IP_NF_CONNTRACK | 36 | depends on IP_NF_CONNTRACK |
@@ -315,20 +312,6 @@ config IP_NF_MATCH_ADDRTYPE | |||
315 | If you want to compile it as a module, say M here and read | 312 | If you want to compile it as a module, say M here and read |
316 | <file:Documentation/modules.txt>. If unsure, say `N'. | 313 | <file:Documentation/modules.txt>. If unsure, say `N'. |
317 | 314 | ||
318 | config IP_NF_MATCH_HASHLIMIT | ||
319 | tristate 'hashlimit match support' | ||
320 | depends on IP_NF_IPTABLES | ||
321 | help | ||
322 | This option adds a new iptables `hashlimit' match. | ||
323 | |||
324 | As opposed to `limit', this match dynamically creates a hash table | ||
325 | of limit buckets, based on your selection of source/destination | ||
326 | ip addresses and/or ports. | ||
327 | |||
328 | It enables you to express policies like `10kpps for any given | ||
329 | destination IP' or `500pps from any given source IP' with a single | ||
330 | IPtables rule. | ||
331 | |||
332 | # `filter', generic and specific targets | 315 | # `filter', generic and specific targets |
333 | config IP_NF_FILTER | 316 | config IP_NF_FILTER |
334 | tristate "Packet filtering" | 317 | tristate "Packet filtering" |
@@ -404,7 +387,7 @@ config IP_NF_TARGET_TCPMSS | |||
404 | 387 | ||
405 | To compile it as a module, choose M here. If unsure, say N. | 388 | To compile it as a module, choose M here. If unsure, say N. |
406 | 389 | ||
407 | # NAT + specific targets | 390 | # NAT + specific targets: ip_conntrack |
408 | config IP_NF_NAT | 391 | config IP_NF_NAT |
409 | tristate "Full NAT" | 392 | tristate "Full NAT" |
410 | depends on IP_NF_IPTABLES && IP_NF_CONNTRACK | 393 | depends on IP_NF_IPTABLES && IP_NF_CONNTRACK |
@@ -415,14 +398,30 @@ config IP_NF_NAT | |||
415 | 398 | ||
416 | To compile it as a module, choose M here. If unsure, say N. | 399 | To compile it as a module, choose M here. If unsure, say N. |
417 | 400 | ||
401 | # NAT + specific targets: nf_conntrack | ||
402 | config NF_NAT | ||
403 | tristate "Full NAT" | ||
404 | depends on IP_NF_IPTABLES && NF_CONNTRACK | ||
405 | help | ||
406 | The Full NAT option allows masquerading, port forwarding and other | ||
407 | forms of full Network Address Port Translation. It is controlled by | ||
408 | the `nat' table in iptables: see the man page for iptables(8). | ||
409 | |||
410 | To compile it as a module, choose M here. If unsure, say N. | ||
411 | |||
418 | config IP_NF_NAT_NEEDED | 412 | config IP_NF_NAT_NEEDED |
419 | bool | 413 | bool |
420 | depends on IP_NF_NAT != n | 414 | depends on IP_NF_NAT |
415 | default y | ||
416 | |||
417 | config NF_NAT_NEEDED | ||
418 | bool | ||
419 | depends on NF_NAT | ||
421 | default y | 420 | default y |
422 | 421 | ||
423 | config IP_NF_TARGET_MASQUERADE | 422 | config IP_NF_TARGET_MASQUERADE |
424 | tristate "MASQUERADE target support" | 423 | tristate "MASQUERADE target support" |
425 | depends on IP_NF_NAT | 424 | depends on (NF_NAT || IP_NF_NAT) |
426 | help | 425 | help |
427 | Masquerading is a special case of NAT: all outgoing connections are | 426 | Masquerading is a special case of NAT: all outgoing connections are |
428 | changed to seem to come from a particular interface's address, and | 427 | changed to seem to come from a particular interface's address, and |
@@ -434,7 +433,7 @@ config IP_NF_TARGET_MASQUERADE | |||
434 | 433 | ||
435 | config IP_NF_TARGET_REDIRECT | 434 | config IP_NF_TARGET_REDIRECT |
436 | tristate "REDIRECT target support" | 435 | tristate "REDIRECT target support" |
437 | depends on IP_NF_NAT | 436 | depends on (NF_NAT || IP_NF_NAT) |
438 | help | 437 | help |
439 | REDIRECT is a special case of NAT: all incoming connections are | 438 | REDIRECT is a special case of NAT: all incoming connections are |
440 | mapped onto the incoming interface's address, causing the packets to | 439 | mapped onto the incoming interface's address, causing the packets to |
@@ -445,7 +444,7 @@ config IP_NF_TARGET_REDIRECT | |||
445 | 444 | ||
446 | config IP_NF_TARGET_NETMAP | 445 | config IP_NF_TARGET_NETMAP |
447 | tristate "NETMAP target support" | 446 | tristate "NETMAP target support" |
448 | depends on IP_NF_NAT | 447 | depends on (NF_NAT || IP_NF_NAT) |
449 | help | 448 | help |
450 | NETMAP is an implementation of static 1:1 NAT mapping of network | 449 | NETMAP is an implementation of static 1:1 NAT mapping of network |
451 | addresses. It maps the network address part, while keeping the host | 450 | addresses. It maps the network address part, while keeping the host |
@@ -456,7 +455,7 @@ config IP_NF_TARGET_NETMAP | |||
456 | 455 | ||
457 | config IP_NF_TARGET_SAME | 456 | config IP_NF_TARGET_SAME |
458 | tristate "SAME target support" | 457 | tristate "SAME target support" |
459 | depends on IP_NF_NAT | 458 | depends on (NF_NAT || IP_NF_NAT) |
460 | help | 459 | help |
461 | This option adds a `SAME' target, which works like the standard SNAT | 460 | This option adds a `SAME' target, which works like the standard SNAT |
462 | target, but attempts to give clients the same IP for all connections. | 461 | target, but attempts to give clients the same IP for all connections. |
@@ -478,19 +477,52 @@ config IP_NF_NAT_SNMP_BASIC | |||
478 | 477 | ||
479 | To compile it as a module, choose M here. If unsure, say N. | 478 | To compile it as a module, choose M here. If unsure, say N. |
480 | 479 | ||
480 | config NF_NAT_SNMP_BASIC | ||
481 | tristate "Basic SNMP-ALG support (EXPERIMENTAL)" | ||
482 | depends on EXPERIMENTAL && NF_NAT | ||
483 | ---help--- | ||
484 | |||
485 | This module implements an Application Layer Gateway (ALG) for | ||
486 | SNMP payloads. In conjunction with NAT, it allows a network | ||
487 | management system to access multiple private networks with | ||
488 | conflicting addresses. It works by modifying IP addresses | ||
489 | inside SNMP payloads to match IP-layer NAT mapping. | ||
490 | |||
491 | This is the "basic" form of SNMP-ALG, as described in RFC 2962 | ||
492 | |||
493 | To compile it as a module, choose M here. If unsure, say N. | ||
494 | |||
495 | # If they want FTP, set to $CONFIG_IP_NF_NAT (m or y), | ||
496 | # or $CONFIG_IP_NF_FTP (m or y), whichever is weaker. | ||
497 | # From kconfig-language.txt: | ||
498 | # | ||
499 | # <expr> '&&' <expr> (6) | ||
500 | # | ||
501 | # (6) Returns the result of min(/expr/, /expr/). | ||
502 | config NF_NAT_PROTO_GRE | ||
503 | tristate | ||
504 | depends on NF_NAT && NF_CT_PROTO_GRE | ||
505 | |||
506 | config IP_NF_NAT_FTP | ||
507 | tristate | ||
508 | depends on IP_NF_IPTABLES && IP_NF_CONNTRACK && IP_NF_NAT | ||
509 | default IP_NF_NAT && IP_NF_FTP | ||
510 | |||
511 | config NF_NAT_FTP | ||
512 | tristate | ||
513 | depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT | ||
514 | default NF_NAT && NF_CONNTRACK_FTP | ||
515 | |||
481 | config IP_NF_NAT_IRC | 516 | config IP_NF_NAT_IRC |
482 | tristate | 517 | tristate |
483 | depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n | 518 | depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n |
484 | default IP_NF_NAT if IP_NF_IRC=y | 519 | default IP_NF_NAT if IP_NF_IRC=y |
485 | default m if IP_NF_IRC=m | 520 | default m if IP_NF_IRC=m |
486 | 521 | ||
487 | # If they want FTP, set to $CONFIG_IP_NF_NAT (m or y), | 522 | config NF_NAT_IRC |
488 | # or $CONFIG_IP_NF_FTP (m or y), whichever is weaker. Argh. | ||
489 | config IP_NF_NAT_FTP | ||
490 | tristate | 523 | tristate |
491 | depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n | 524 | depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT |
492 | default IP_NF_NAT if IP_NF_FTP=y | 525 | default NF_NAT && NF_CONNTRACK_IRC |
493 | default m if IP_NF_FTP=m | ||
494 | 526 | ||
495 | config IP_NF_NAT_TFTP | 527 | config IP_NF_NAT_TFTP |
496 | tristate | 528 | tristate |
@@ -498,30 +530,56 @@ config IP_NF_NAT_TFTP | |||
498 | default IP_NF_NAT if IP_NF_TFTP=y | 530 | default IP_NF_NAT if IP_NF_TFTP=y |
499 | default m if IP_NF_TFTP=m | 531 | default m if IP_NF_TFTP=m |
500 | 532 | ||
533 | config NF_NAT_TFTP | ||
534 | tristate | ||
535 | depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT | ||
536 | default NF_NAT && NF_CONNTRACK_TFTP | ||
537 | |||
501 | config IP_NF_NAT_AMANDA | 538 | config IP_NF_NAT_AMANDA |
502 | tristate | 539 | tristate |
503 | depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n | 540 | depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n |
504 | default IP_NF_NAT if IP_NF_AMANDA=y | 541 | default IP_NF_NAT if IP_NF_AMANDA=y |
505 | default m if IP_NF_AMANDA=m | 542 | default m if IP_NF_AMANDA=m |
506 | 543 | ||
544 | config NF_NAT_AMANDA | ||
545 | tristate | ||
546 | depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT | ||
547 | default NF_NAT && NF_CONNTRACK_AMANDA | ||
548 | |||
507 | config IP_NF_NAT_PPTP | 549 | config IP_NF_NAT_PPTP |
508 | tristate | 550 | tristate |
509 | depends on IP_NF_NAT!=n && IP_NF_PPTP!=n | 551 | depends on IP_NF_NAT!=n && IP_NF_PPTP!=n |
510 | default IP_NF_NAT if IP_NF_PPTP=y | 552 | default IP_NF_NAT if IP_NF_PPTP=y |
511 | default m if IP_NF_PPTP=m | 553 | default m if IP_NF_PPTP=m |
512 | 554 | ||
555 | config NF_NAT_PPTP | ||
556 | tristate | ||
557 | depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT | ||
558 | default NF_NAT && NF_CONNTRACK_PPTP | ||
559 | select NF_NAT_PROTO_GRE | ||
560 | |||
513 | config IP_NF_NAT_H323 | 561 | config IP_NF_NAT_H323 |
514 | tristate | 562 | tristate |
515 | depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n | 563 | depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n |
516 | default IP_NF_NAT if IP_NF_H323=y | 564 | default IP_NF_NAT if IP_NF_H323=y |
517 | default m if IP_NF_H323=m | 565 | default m if IP_NF_H323=m |
518 | 566 | ||
567 | config NF_NAT_H323 | ||
568 | tristate | ||
569 | depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT | ||
570 | default NF_NAT && NF_CONNTRACK_H323 | ||
571 | |||
519 | config IP_NF_NAT_SIP | 572 | config IP_NF_NAT_SIP |
520 | tristate | 573 | tristate |
521 | depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n | 574 | depends on IP_NF_IPTABLES!=n && IP_NF_CONNTRACK!=n && IP_NF_NAT!=n |
522 | default IP_NF_NAT if IP_NF_SIP=y | 575 | default IP_NF_NAT if IP_NF_SIP=y |
523 | default m if IP_NF_SIP=m | 576 | default m if IP_NF_SIP=m |
524 | 577 | ||
578 | config NF_NAT_SIP | ||
579 | tristate | ||
580 | depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT | ||
581 | default NF_NAT && NF_CONNTRACK_SIP | ||
582 | |||
525 | # mangle + specific targets | 583 | # mangle + specific targets |
526 | config IP_NF_MANGLE | 584 | config IP_NF_MANGLE |
527 | tristate "Packet mangling" | 585 | tristate "Packet mangling" |
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index 09aaed1a8063..15e741aeb291 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile | |||
@@ -5,17 +5,23 @@ | |||
5 | # objects for the standalone - connection tracking / NAT | 5 | # objects for the standalone - connection tracking / NAT |
6 | ip_conntrack-objs := ip_conntrack_standalone.o ip_conntrack_core.o ip_conntrack_proto_generic.o ip_conntrack_proto_tcp.o ip_conntrack_proto_udp.o ip_conntrack_proto_icmp.o | 6 | ip_conntrack-objs := ip_conntrack_standalone.o ip_conntrack_core.o ip_conntrack_proto_generic.o ip_conntrack_proto_tcp.o ip_conntrack_proto_udp.o ip_conntrack_proto_icmp.o |
7 | ip_nat-objs := ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o | 7 | ip_nat-objs := ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o |
8 | nf_nat-objs := nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o | ||
9 | ifneq ($(CONFIG_NF_NAT),) | ||
10 | iptable_nat-objs := nf_nat_rule.o nf_nat_standalone.o | ||
11 | else | ||
8 | iptable_nat-objs := ip_nat_rule.o ip_nat_standalone.o | 12 | iptable_nat-objs := ip_nat_rule.o ip_nat_standalone.o |
13 | endif | ||
9 | 14 | ||
10 | ip_conntrack_pptp-objs := ip_conntrack_helper_pptp.o ip_conntrack_proto_gre.o | 15 | ip_conntrack_pptp-objs := ip_conntrack_helper_pptp.o ip_conntrack_proto_gre.o |
11 | ip_nat_pptp-objs := ip_nat_helper_pptp.o ip_nat_proto_gre.o | 16 | ip_nat_pptp-objs := ip_nat_helper_pptp.o ip_nat_proto_gre.o |
12 | 17 | ||
13 | ip_conntrack_h323-objs := ip_conntrack_helper_h323.o ip_conntrack_helper_h323_asn1.o | 18 | ip_conntrack_h323-objs := ip_conntrack_helper_h323.o ../../netfilter/nf_conntrack_h323_asn1.o |
14 | ip_nat_h323-objs := ip_nat_helper_h323.o | 19 | ip_nat_h323-objs := ip_nat_helper_h323.o |
15 | 20 | ||
16 | # connection tracking | 21 | # connection tracking |
17 | obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o | 22 | obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o |
18 | obj-$(CONFIG_IP_NF_NAT) += ip_nat.o | 23 | obj-$(CONFIG_IP_NF_NAT) += ip_nat.o |
24 | obj-$(CONFIG_NF_NAT) += nf_nat.o | ||
19 | 25 | ||
20 | # conntrack netlink interface | 26 | # conntrack netlink interface |
21 | obj-$(CONFIG_IP_NF_CONNTRACK_NETLINK) += ip_conntrack_netlink.o | 27 | obj-$(CONFIG_IP_NF_CONNTRACK_NETLINK) += ip_conntrack_netlink.o |
@@ -34,7 +40,7 @@ obj-$(CONFIG_IP_NF_IRC) += ip_conntrack_irc.o | |||
34 | obj-$(CONFIG_IP_NF_SIP) += ip_conntrack_sip.o | 40 | obj-$(CONFIG_IP_NF_SIP) += ip_conntrack_sip.o |
35 | obj-$(CONFIG_IP_NF_NETBIOS_NS) += ip_conntrack_netbios_ns.o | 41 | obj-$(CONFIG_IP_NF_NETBIOS_NS) += ip_conntrack_netbios_ns.o |
36 | 42 | ||
37 | # NAT helpers | 43 | # NAT helpers (ip_conntrack) |
38 | obj-$(CONFIG_IP_NF_NAT_H323) += ip_nat_h323.o | 44 | obj-$(CONFIG_IP_NF_NAT_H323) += ip_nat_h323.o |
39 | obj-$(CONFIG_IP_NF_NAT_PPTP) += ip_nat_pptp.o | 45 | obj-$(CONFIG_IP_NF_NAT_PPTP) += ip_nat_pptp.o |
40 | obj-$(CONFIG_IP_NF_NAT_AMANDA) += ip_nat_amanda.o | 46 | obj-$(CONFIG_IP_NF_NAT_AMANDA) += ip_nat_amanda.o |
@@ -43,6 +49,19 @@ obj-$(CONFIG_IP_NF_NAT_FTP) += ip_nat_ftp.o | |||
43 | obj-$(CONFIG_IP_NF_NAT_IRC) += ip_nat_irc.o | 49 | obj-$(CONFIG_IP_NF_NAT_IRC) += ip_nat_irc.o |
44 | obj-$(CONFIG_IP_NF_NAT_SIP) += ip_nat_sip.o | 50 | obj-$(CONFIG_IP_NF_NAT_SIP) += ip_nat_sip.o |
45 | 51 | ||
52 | # NAT helpers (nf_conntrack) | ||
53 | obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_amanda.o | ||
54 | obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o | ||
55 | obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o | ||
56 | obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o | ||
57 | obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o | ||
58 | obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o | ||
59 | obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o | ||
60 | obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o | ||
61 | |||
62 | # NAT protocols (nf_nat) | ||
63 | obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o | ||
64 | |||
46 | # generic IP tables | 65 | # generic IP tables |
47 | obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o | 66 | obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o |
48 | 67 | ||
@@ -50,10 +69,10 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o | |||
50 | obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o | 69 | obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o |
51 | obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o | 70 | obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o |
52 | obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o | 71 | obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o |
72 | obj-$(CONFIG_NF_NAT) += iptable_nat.o | ||
53 | obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o | 73 | obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o |
54 | 74 | ||
55 | # matches | 75 | # matches |
56 | obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o | ||
57 | obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o | 76 | obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o |
58 | obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o | 77 | obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o |
59 | obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o | 78 | obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o |
@@ -89,6 +108,11 @@ obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o | |||
89 | 108 | ||
90 | # objects for l3 independent conntrack | 109 | # objects for l3 independent conntrack |
91 | nf_conntrack_ipv4-objs := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o | 110 | nf_conntrack_ipv4-objs := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o |
111 | ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y) | ||
112 | ifeq ($(CONFIG_PROC_FS),y) | ||
113 | nf_conntrack_ipv4-objs += nf_conntrack_l3proto_ipv4_compat.o | ||
114 | endif | ||
115 | endif | ||
92 | 116 | ||
93 | # l3 independent conntrack | 117 | # l3 independent conntrack |
94 | obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o | 118 | obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o |
diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c index 6c7383a8e42b..ad246ba7790b 100644 --- a/net/ipv4/netfilter/ip_conntrack_amanda.c +++ b/net/ipv4/netfilter/ip_conntrack_amanda.c | |||
@@ -92,6 +92,7 @@ static int help(struct sk_buff **pskb, | |||
92 | char pbuf[sizeof("65535")], *tmp; | 92 | char pbuf[sizeof("65535")], *tmp; |
93 | u_int16_t port, len; | 93 | u_int16_t port, len; |
94 | int ret = NF_ACCEPT; | 94 | int ret = NF_ACCEPT; |
95 | typeof(ip_nat_amanda_hook) ip_nat_amanda; | ||
95 | 96 | ||
96 | /* Only look at packets from the Amanda server */ | 97 | /* Only look at packets from the Amanda server */ |
97 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) | 98 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) |
@@ -161,9 +162,11 @@ static int help(struct sk_buff **pskb, | |||
161 | exp->mask.dst.protonum = 0xFF; | 162 | exp->mask.dst.protonum = 0xFF; |
162 | exp->mask.dst.u.tcp.port = htons(0xFFFF); | 163 | exp->mask.dst.u.tcp.port = htons(0xFFFF); |
163 | 164 | ||
164 | if (ip_nat_amanda_hook) | 165 | /* RCU read locked by nf_hook_slow */ |
165 | ret = ip_nat_amanda_hook(pskb, ctinfo, off - dataoff, | 166 | ip_nat_amanda = rcu_dereference(ip_nat_amanda_hook); |
166 | len, exp); | 167 | if (ip_nat_amanda) |
168 | ret = ip_nat_amanda(pskb, ctinfo, off - dataoff, | ||
169 | len, exp); | ||
167 | else if (ip_conntrack_expect_related(exp) != 0) | 170 | else if (ip_conntrack_expect_related(exp) != 0) |
168 | ret = NF_DROP; | 171 | ret = NF_DROP; |
169 | ip_conntrack_expect_put(exp); | 172 | ip_conntrack_expect_put(exp); |
@@ -180,7 +183,7 @@ static struct ip_conntrack_helper amanda_helper = { | |||
180 | .help = help, | 183 | .help = help, |
181 | .name = "amanda", | 184 | .name = "amanda", |
182 | 185 | ||
183 | .tuple = { .src = { .u = { __constant_htons(10080) } }, | 186 | .tuple = { .src = { .u = { .udp = {.port = __constant_htons(10080) } } }, |
184 | .dst = { .protonum = IPPROTO_UDP }, | 187 | .dst = { .protonum = IPPROTO_UDP }, |
185 | }, | 188 | }, |
186 | .mask = { .src = { .u = { 0xFFFF } }, | 189 | .mask = { .src = { .u = { 0xFFFF } }, |
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 143c4668538b..f4b0e68a16d2 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c | |||
@@ -40,9 +40,6 @@ | |||
40 | 40 | ||
41 | /* ip_conntrack_lock protects the main hash table, protocol/helper/expected | 41 | /* ip_conntrack_lock protects the main hash table, protocol/helper/expected |
42 | registrations, conntrack timers*/ | 42 | registrations, conntrack timers*/ |
43 | #define ASSERT_READ_LOCK(x) | ||
44 | #define ASSERT_WRITE_LOCK(x) | ||
45 | |||
46 | #include <linux/netfilter_ipv4/ip_conntrack.h> | 43 | #include <linux/netfilter_ipv4/ip_conntrack.h> |
47 | #include <linux/netfilter_ipv4/ip_conntrack_protocol.h> | 44 | #include <linux/netfilter_ipv4/ip_conntrack_protocol.h> |
48 | #include <linux/netfilter_ipv4/ip_conntrack_helper.h> | 45 | #include <linux/netfilter_ipv4/ip_conntrack_helper.h> |
@@ -201,7 +198,6 @@ ip_ct_invert_tuple(struct ip_conntrack_tuple *inverse, | |||
201 | /* ip_conntrack_expect helper functions */ | 198 | /* ip_conntrack_expect helper functions */ |
202 | void ip_ct_unlink_expect(struct ip_conntrack_expect *exp) | 199 | void ip_ct_unlink_expect(struct ip_conntrack_expect *exp) |
203 | { | 200 | { |
204 | ASSERT_WRITE_LOCK(&ip_conntrack_lock); | ||
205 | IP_NF_ASSERT(!timer_pending(&exp->timeout)); | 201 | IP_NF_ASSERT(!timer_pending(&exp->timeout)); |
206 | list_del(&exp->list); | 202 | list_del(&exp->list); |
207 | CONNTRACK_STAT_INC(expect_delete); | 203 | CONNTRACK_STAT_INC(expect_delete); |
@@ -225,22 +221,22 @@ __ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple) | |||
225 | struct ip_conntrack_expect *i; | 221 | struct ip_conntrack_expect *i; |
226 | 222 | ||
227 | list_for_each_entry(i, &ip_conntrack_expect_list, list) { | 223 | list_for_each_entry(i, &ip_conntrack_expect_list, list) { |
228 | if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) { | 224 | if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) |
229 | atomic_inc(&i->use); | ||
230 | return i; | 225 | return i; |
231 | } | ||
232 | } | 226 | } |
233 | return NULL; | 227 | return NULL; |
234 | } | 228 | } |
235 | 229 | ||
236 | /* Just find a expectation corresponding to a tuple. */ | 230 | /* Just find a expectation corresponding to a tuple. */ |
237 | struct ip_conntrack_expect * | 231 | struct ip_conntrack_expect * |
238 | ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple) | 232 | ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple) |
239 | { | 233 | { |
240 | struct ip_conntrack_expect *i; | 234 | struct ip_conntrack_expect *i; |
241 | 235 | ||
242 | read_lock_bh(&ip_conntrack_lock); | 236 | read_lock_bh(&ip_conntrack_lock); |
243 | i = __ip_conntrack_expect_find(tuple); | 237 | i = __ip_conntrack_expect_find(tuple); |
238 | if (i) | ||
239 | atomic_inc(&i->use); | ||
244 | read_unlock_bh(&ip_conntrack_lock); | 240 | read_unlock_bh(&ip_conntrack_lock); |
245 | 241 | ||
246 | return i; | 242 | return i; |
@@ -294,7 +290,6 @@ static void | |||
294 | clean_from_lists(struct ip_conntrack *ct) | 290 | clean_from_lists(struct ip_conntrack *ct) |
295 | { | 291 | { |
296 | DEBUGP("clean_from_lists(%p)\n", ct); | 292 | DEBUGP("clean_from_lists(%p)\n", ct); |
297 | ASSERT_WRITE_LOCK(&ip_conntrack_lock); | ||
298 | list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); | 293 | list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); |
299 | list_del(&ct->tuplehash[IP_CT_DIR_REPLY].list); | 294 | list_del(&ct->tuplehash[IP_CT_DIR_REPLY].list); |
300 | 295 | ||
@@ -373,7 +368,6 @@ __ip_conntrack_find(const struct ip_conntrack_tuple *tuple, | |||
373 | struct ip_conntrack_tuple_hash *h; | 368 | struct ip_conntrack_tuple_hash *h; |
374 | unsigned int hash = hash_conntrack(tuple); | 369 | unsigned int hash = hash_conntrack(tuple); |
375 | 370 | ||
376 | ASSERT_READ_LOCK(&ip_conntrack_lock); | ||
377 | list_for_each_entry(h, &ip_conntrack_hash[hash], list) { | 371 | list_for_each_entry(h, &ip_conntrack_hash[hash], list) { |
378 | if (tuplehash_to_ctrack(h) != ignored_conntrack && | 372 | if (tuplehash_to_ctrack(h) != ignored_conntrack && |
379 | ip_ct_tuple_equal(tuple, &h->tuple)) { | 373 | ip_ct_tuple_equal(tuple, &h->tuple)) { |
diff --git a/net/ipv4/netfilter/ip_conntrack_ftp.c b/net/ipv4/netfilter/ip_conntrack_ftp.c index 93dcf960662f..0410c99cacae 100644 --- a/net/ipv4/netfilter/ip_conntrack_ftp.c +++ b/net/ipv4/netfilter/ip_conntrack_ftp.c | |||
@@ -310,6 +310,7 @@ static int help(struct sk_buff **pskb, | |||
310 | struct ip_conntrack_expect *exp; | 310 | struct ip_conntrack_expect *exp; |
311 | unsigned int i; | 311 | unsigned int i; |
312 | int found = 0, ends_in_nl; | 312 | int found = 0, ends_in_nl; |
313 | typeof(ip_nat_ftp_hook) ip_nat_ftp; | ||
313 | 314 | ||
314 | /* Until there's been traffic both ways, don't look in packets. */ | 315 | /* Until there's been traffic both ways, don't look in packets. */ |
315 | if (ctinfo != IP_CT_ESTABLISHED | 316 | if (ctinfo != IP_CT_ESTABLISHED |
@@ -433,9 +434,10 @@ static int help(struct sk_buff **pskb, | |||
433 | 434 | ||
434 | /* Now, NAT might want to mangle the packet, and register the | 435 | /* Now, NAT might want to mangle the packet, and register the |
435 | * (possibly changed) expectation itself. */ | 436 | * (possibly changed) expectation itself. */ |
436 | if (ip_nat_ftp_hook) | 437 | ip_nat_ftp = rcu_dereference(ip_nat_ftp_hook); |
437 | ret = ip_nat_ftp_hook(pskb, ctinfo, search[dir][i].ftptype, | 438 | if (ip_nat_ftp) |
438 | matchoff, matchlen, exp, &seq); | 439 | ret = ip_nat_ftp(pskb, ctinfo, search[dir][i].ftptype, |
440 | matchoff, matchlen, exp, &seq); | ||
439 | else { | 441 | else { |
440 | /* Can't expect this? Best to drop packet now. */ | 442 | /* Can't expect this? Best to drop packet now. */ |
441 | if (ip_conntrack_expect_related(exp) != 0) | 443 | if (ip_conntrack_expect_related(exp) != 0) |
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c index 7b7441202bfd..aabfe1c06905 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c | |||
@@ -237,6 +237,7 @@ static int expect_rtp_rtcp(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
237 | u_int16_t rtp_port; | 237 | u_int16_t rtp_port; |
238 | struct ip_conntrack_expect *rtp_exp; | 238 | struct ip_conntrack_expect *rtp_exp; |
239 | struct ip_conntrack_expect *rtcp_exp; | 239 | struct ip_conntrack_expect *rtcp_exp; |
240 | typeof(nat_rtp_rtcp_hook) nat_rtp_rtcp; | ||
240 | 241 | ||
241 | /* Read RTP or RTCP address */ | 242 | /* Read RTP or RTCP address */ |
242 | if (!get_h245_addr(*data, addr, &ip, &port) || | 243 | if (!get_h245_addr(*data, addr, &ip, &port) || |
@@ -279,11 +280,11 @@ static int expect_rtp_rtcp(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
279 | rtcp_exp->flags = 0; | 280 | rtcp_exp->flags = 0; |
280 | 281 | ||
281 | if (ct->tuplehash[dir].tuple.src.ip != | 282 | if (ct->tuplehash[dir].tuple.src.ip != |
282 | ct->tuplehash[!dir].tuple.dst.ip && nat_rtp_rtcp_hook) { | 283 | ct->tuplehash[!dir].tuple.dst.ip && |
284 | (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook))) { | ||
283 | /* NAT needed */ | 285 | /* NAT needed */ |
284 | ret = nat_rtp_rtcp_hook(pskb, ct, ctinfo, data, dataoff, | 286 | ret = nat_rtp_rtcp(pskb, ct, ctinfo, data, dataoff, |
285 | addr, port, rtp_port, rtp_exp, | 287 | addr, port, rtp_port, rtp_exp, rtcp_exp); |
286 | rtcp_exp); | ||
287 | } else { /* Conntrack only */ | 288 | } else { /* Conntrack only */ |
288 | rtp_exp->expectfn = NULL; | 289 | rtp_exp->expectfn = NULL; |
289 | rtcp_exp->expectfn = NULL; | 290 | rtcp_exp->expectfn = NULL; |
@@ -328,6 +329,7 @@ static int expect_t120(struct sk_buff **pskb, | |||
328 | __be32 ip; | 329 | __be32 ip; |
329 | u_int16_t port; | 330 | u_int16_t port; |
330 | struct ip_conntrack_expect *exp = NULL; | 331 | struct ip_conntrack_expect *exp = NULL; |
332 | typeof(nat_t120_hook) nat_t120; | ||
331 | 333 | ||
332 | /* Read T.120 address */ | 334 | /* Read T.120 address */ |
333 | if (!get_h245_addr(*data, addr, &ip, &port) || | 335 | if (!get_h245_addr(*data, addr, &ip, &port) || |
@@ -350,10 +352,11 @@ static int expect_t120(struct sk_buff **pskb, | |||
350 | exp->flags = IP_CT_EXPECT_PERMANENT; /* Accept multiple channels */ | 352 | exp->flags = IP_CT_EXPECT_PERMANENT; /* Accept multiple channels */ |
351 | 353 | ||
352 | if (ct->tuplehash[dir].tuple.src.ip != | 354 | if (ct->tuplehash[dir].tuple.src.ip != |
353 | ct->tuplehash[!dir].tuple.dst.ip && nat_t120_hook) { | 355 | ct->tuplehash[!dir].tuple.dst.ip && |
356 | (nat_t120 = rcu_dereference(nat_t120_hook))) { | ||
354 | /* NAT needed */ | 357 | /* NAT needed */ |
355 | ret = nat_t120_hook(pskb, ct, ctinfo, data, dataoff, addr, | 358 | ret = nat_t120(pskb, ct, ctinfo, data, dataoff, addr, |
356 | port, exp); | 359 | port, exp); |
357 | } else { /* Conntrack only */ | 360 | } else { /* Conntrack only */ |
358 | exp->expectfn = NULL; | 361 | exp->expectfn = NULL; |
359 | if (ip_conntrack_expect_related(exp) == 0) { | 362 | if (ip_conntrack_expect_related(exp) == 0) { |
@@ -651,6 +654,7 @@ static int expect_h245(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
651 | __be32 ip; | 654 | __be32 ip; |
652 | u_int16_t port; | 655 | u_int16_t port; |
653 | struct ip_conntrack_expect *exp = NULL; | 656 | struct ip_conntrack_expect *exp = NULL; |
657 | typeof(nat_h245_hook) nat_h245; | ||
654 | 658 | ||
655 | /* Read h245Address */ | 659 | /* Read h245Address */ |
656 | if (!get_h225_addr(*data, addr, &ip, &port) || | 660 | if (!get_h225_addr(*data, addr, &ip, &port) || |
@@ -673,10 +677,11 @@ static int expect_h245(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
673 | exp->flags = 0; | 677 | exp->flags = 0; |
674 | 678 | ||
675 | if (ct->tuplehash[dir].tuple.src.ip != | 679 | if (ct->tuplehash[dir].tuple.src.ip != |
676 | ct->tuplehash[!dir].tuple.dst.ip && nat_h245_hook) { | 680 | ct->tuplehash[!dir].tuple.dst.ip && |
681 | (nat_h245 = rcu_dereference(nat_h245_hook))) { | ||
677 | /* NAT needed */ | 682 | /* NAT needed */ |
678 | ret = nat_h245_hook(pskb, ct, ctinfo, data, dataoff, addr, | 683 | ret = nat_h245(pskb, ct, ctinfo, data, dataoff, addr, |
679 | port, exp); | 684 | port, exp); |
680 | } else { /* Conntrack only */ | 685 | } else { /* Conntrack only */ |
681 | exp->expectfn = ip_conntrack_h245_expect; | 686 | exp->expectfn = ip_conntrack_h245_expect; |
682 | 687 | ||
@@ -712,6 +717,7 @@ static int expect_callforwarding(struct sk_buff **pskb, | |||
712 | __be32 ip; | 717 | __be32 ip; |
713 | u_int16_t port; | 718 | u_int16_t port; |
714 | struct ip_conntrack_expect *exp = NULL; | 719 | struct ip_conntrack_expect *exp = NULL; |
720 | typeof(nat_callforwarding_hook) nat_callforwarding; | ||
715 | 721 | ||
716 | /* Read alternativeAddress */ | 722 | /* Read alternativeAddress */ |
717 | if (!get_h225_addr(*data, addr, &ip, &port) || port == 0) | 723 | if (!get_h225_addr(*data, addr, &ip, &port) || port == 0) |
@@ -759,10 +765,11 @@ static int expect_callforwarding(struct sk_buff **pskb, | |||
759 | exp->flags = 0; | 765 | exp->flags = 0; |
760 | 766 | ||
761 | if (ct->tuplehash[dir].tuple.src.ip != | 767 | if (ct->tuplehash[dir].tuple.src.ip != |
762 | ct->tuplehash[!dir].tuple.dst.ip && nat_callforwarding_hook) { | 768 | ct->tuplehash[!dir].tuple.dst.ip && |
769 | (nat_callforwarding = rcu_dereference(nat_callforwarding_hook))) { | ||
763 | /* Need NAT */ | 770 | /* Need NAT */ |
764 | ret = nat_callforwarding_hook(pskb, ct, ctinfo, data, dataoff, | 771 | ret = nat_callforwarding(pskb, ct, ctinfo, data, dataoff, |
765 | addr, port, exp); | 772 | addr, port, exp); |
766 | } else { /* Conntrack only */ | 773 | } else { /* Conntrack only */ |
767 | exp->expectfn = ip_conntrack_q931_expect; | 774 | exp->expectfn = ip_conntrack_q931_expect; |
768 | 775 | ||
@@ -793,6 +800,7 @@ static int process_setup(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
793 | int i; | 800 | int i; |
794 | __be32 ip; | 801 | __be32 ip; |
795 | u_int16_t port; | 802 | u_int16_t port; |
803 | typeof(set_h225_addr_hook) set_h225_addr; | ||
796 | 804 | ||
797 | DEBUGP("ip_ct_q931: Setup\n"); | 805 | DEBUGP("ip_ct_q931: Setup\n"); |
798 | 806 | ||
@@ -803,8 +811,10 @@ static int process_setup(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
803 | return -1; | 811 | return -1; |
804 | } | 812 | } |
805 | 813 | ||
814 | set_h225_addr = rcu_dereference(set_h225_addr_hook); | ||
815 | |||
806 | if ((setup->options & eSetup_UUIE_destCallSignalAddress) && | 816 | if ((setup->options & eSetup_UUIE_destCallSignalAddress) && |
807 | (set_h225_addr_hook) && | 817 | (set_h225_addr) && |
808 | get_h225_addr(*data, &setup->destCallSignalAddress, &ip, &port) && | 818 | get_h225_addr(*data, &setup->destCallSignalAddress, &ip, &port) && |
809 | ip != ct->tuplehash[!dir].tuple.src.ip) { | 819 | ip != ct->tuplehash[!dir].tuple.src.ip) { |
810 | DEBUGP("ip_ct_q931: set destCallSignalAddress " | 820 | DEBUGP("ip_ct_q931: set destCallSignalAddress " |
@@ -812,17 +822,17 @@ static int process_setup(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
812 | NIPQUAD(ip), port, | 822 | NIPQUAD(ip), port, |
813 | NIPQUAD(ct->tuplehash[!dir].tuple.src.ip), | 823 | NIPQUAD(ct->tuplehash[!dir].tuple.src.ip), |
814 | ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port)); | 824 | ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port)); |
815 | ret = set_h225_addr_hook(pskb, data, dataoff, | 825 | ret = set_h225_addr(pskb, data, dataoff, |
816 | &setup->destCallSignalAddress, | 826 | &setup->destCallSignalAddress, |
817 | ct->tuplehash[!dir].tuple.src.ip, | 827 | ct->tuplehash[!dir].tuple.src.ip, |
818 | ntohs(ct->tuplehash[!dir].tuple.src. | 828 | ntohs(ct->tuplehash[!dir].tuple.src. |
819 | u.tcp.port)); | 829 | u.tcp.port)); |
820 | if (ret < 0) | 830 | if (ret < 0) |
821 | return -1; | 831 | return -1; |
822 | } | 832 | } |
823 | 833 | ||
824 | if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) && | 834 | if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) && |
825 | (set_h225_addr_hook) && | 835 | (set_h225_addr) && |
826 | get_h225_addr(*data, &setup->sourceCallSignalAddress, &ip, &port) | 836 | get_h225_addr(*data, &setup->sourceCallSignalAddress, &ip, &port) |
827 | && ip != ct->tuplehash[!dir].tuple.dst.ip) { | 837 | && ip != ct->tuplehash[!dir].tuple.dst.ip) { |
828 | DEBUGP("ip_ct_q931: set sourceCallSignalAddress " | 838 | DEBUGP("ip_ct_q931: set sourceCallSignalAddress " |
@@ -830,11 +840,11 @@ static int process_setup(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
830 | NIPQUAD(ip), port, | 840 | NIPQUAD(ip), port, |
831 | NIPQUAD(ct->tuplehash[!dir].tuple.dst.ip), | 841 | NIPQUAD(ct->tuplehash[!dir].tuple.dst.ip), |
832 | ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port)); | 842 | ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port)); |
833 | ret = set_h225_addr_hook(pskb, data, dataoff, | 843 | ret = set_h225_addr(pskb, data, dataoff, |
834 | &setup->sourceCallSignalAddress, | 844 | &setup->sourceCallSignalAddress, |
835 | ct->tuplehash[!dir].tuple.dst.ip, | 845 | ct->tuplehash[!dir].tuple.dst.ip, |
836 | ntohs(ct->tuplehash[!dir].tuple.dst. | 846 | ntohs(ct->tuplehash[!dir].tuple.dst. |
837 | u.tcp.port)); | 847 | u.tcp.port)); |
838 | if (ret < 0) | 848 | if (ret < 0) |
839 | return -1; | 849 | return -1; |
840 | } | 850 | } |
@@ -1153,7 +1163,7 @@ static struct ip_conntrack_helper ip_conntrack_helper_q931 = { | |||
1153 | .me = THIS_MODULE, | 1163 | .me = THIS_MODULE, |
1154 | .max_expected = H323_RTP_CHANNEL_MAX * 4 + 4 /* T.120 and H.245 */ , | 1164 | .max_expected = H323_RTP_CHANNEL_MAX * 4 + 4 /* T.120 and H.245 */ , |
1155 | .timeout = 240, | 1165 | .timeout = 240, |
1156 | .tuple = {.src = {.u = {__constant_htons(Q931_PORT)}}, | 1166 | .tuple = {.src = {.u = {.tcp = {.port = __constant_htons(Q931_PORT)}}}, |
1157 | .dst = {.protonum = IPPROTO_TCP}}, | 1167 | .dst = {.protonum = IPPROTO_TCP}}, |
1158 | .mask = {.src = {.u = {0xFFFF}}, | 1168 | .mask = {.src = {.u = {0xFFFF}}, |
1159 | .dst = {.protonum = 0xFF}}, | 1169 | .dst = {.protonum = 0xFF}}, |
@@ -1231,6 +1241,7 @@ static int expect_q931(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1231 | __be32 ip; | 1241 | __be32 ip; |
1232 | u_int16_t port; | 1242 | u_int16_t port; |
1233 | struct ip_conntrack_expect *exp; | 1243 | struct ip_conntrack_expect *exp; |
1244 | typeof(nat_q931_hook) nat_q931; | ||
1234 | 1245 | ||
1235 | /* Look for the first related address */ | 1246 | /* Look for the first related address */ |
1236 | for (i = 0; i < count; i++) { | 1247 | for (i = 0; i < count; i++) { |
@@ -1258,9 +1269,9 @@ static int expect_q931(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1258 | exp->mask.dst.protonum = 0xFF; | 1269 | exp->mask.dst.protonum = 0xFF; |
1259 | exp->flags = IP_CT_EXPECT_PERMANENT; /* Accept multiple calls */ | 1270 | exp->flags = IP_CT_EXPECT_PERMANENT; /* Accept multiple calls */ |
1260 | 1271 | ||
1261 | if (nat_q931_hook) { /* Need NAT */ | 1272 | nat_q931 = rcu_dereference(nat_q931_hook); |
1262 | ret = nat_q931_hook(pskb, ct, ctinfo, data, addr, i, | 1273 | if (nat_q931) { /* Need NAT */ |
1263 | port, exp); | 1274 | ret = nat_q931(pskb, ct, ctinfo, data, addr, i, port, exp); |
1264 | } else { /* Conntrack only */ | 1275 | } else { /* Conntrack only */ |
1265 | exp->expectfn = ip_conntrack_q931_expect; | 1276 | exp->expectfn = ip_conntrack_q931_expect; |
1266 | 1277 | ||
@@ -1288,11 +1299,14 @@ static int process_grq(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1288 | enum ip_conntrack_info ctinfo, | 1299 | enum ip_conntrack_info ctinfo, |
1289 | unsigned char **data, GatekeeperRequest * grq) | 1300 | unsigned char **data, GatekeeperRequest * grq) |
1290 | { | 1301 | { |
1302 | typeof(set_ras_addr_hook) set_ras_addr; | ||
1303 | |||
1291 | DEBUGP("ip_ct_ras: GRQ\n"); | 1304 | DEBUGP("ip_ct_ras: GRQ\n"); |
1292 | 1305 | ||
1293 | if (set_ras_addr_hook) /* NATed */ | 1306 | set_ras_addr = rcu_dereference(set_ras_addr_hook); |
1294 | return set_ras_addr_hook(pskb, ct, ctinfo, data, | 1307 | if (set_ras_addr) /* NATed */ |
1295 | &grq->rasAddress, 1); | 1308 | return set_ras_addr(pskb, ct, ctinfo, data, |
1309 | &grq->rasAddress, 1); | ||
1296 | return 0; | 1310 | return 0; |
1297 | } | 1311 | } |
1298 | 1312 | ||
@@ -1362,6 +1376,7 @@ static int process_rrq(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1362 | { | 1376 | { |
1363 | struct ip_ct_h323_master *info = &ct->help.ct_h323_info; | 1377 | struct ip_ct_h323_master *info = &ct->help.ct_h323_info; |
1364 | int ret; | 1378 | int ret; |
1379 | typeof(set_ras_addr_hook) set_ras_addr; | ||
1365 | 1380 | ||
1366 | DEBUGP("ip_ct_ras: RRQ\n"); | 1381 | DEBUGP("ip_ct_ras: RRQ\n"); |
1367 | 1382 | ||
@@ -1371,10 +1386,11 @@ static int process_rrq(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1371 | if (ret < 0) | 1386 | if (ret < 0) |
1372 | return -1; | 1387 | return -1; |
1373 | 1388 | ||
1374 | if (set_ras_addr_hook) { | 1389 | set_ras_addr = rcu_dereference(set_ras_addr_hook); |
1375 | ret = set_ras_addr_hook(pskb, ct, ctinfo, data, | 1390 | if (set_ras_addr) { |
1376 | rrq->rasAddress.item, | 1391 | ret = set_ras_addr(pskb, ct, ctinfo, data, |
1377 | rrq->rasAddress.count); | 1392 | rrq->rasAddress.item, |
1393 | rrq->rasAddress.count); | ||
1378 | if (ret < 0) | 1394 | if (ret < 0) |
1379 | return -1; | 1395 | return -1; |
1380 | } | 1396 | } |
@@ -1397,13 +1413,15 @@ static int process_rcf(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1397 | int dir = CTINFO2DIR(ctinfo); | 1413 | int dir = CTINFO2DIR(ctinfo); |
1398 | int ret; | 1414 | int ret; |
1399 | struct ip_conntrack_expect *exp; | 1415 | struct ip_conntrack_expect *exp; |
1416 | typeof(set_sig_addr_hook) set_sig_addr; | ||
1400 | 1417 | ||
1401 | DEBUGP("ip_ct_ras: RCF\n"); | 1418 | DEBUGP("ip_ct_ras: RCF\n"); |
1402 | 1419 | ||
1403 | if (set_sig_addr_hook) { | 1420 | set_sig_addr = rcu_dereference(set_sig_addr_hook); |
1404 | ret = set_sig_addr_hook(pskb, ct, ctinfo, data, | 1421 | if (set_sig_addr) { |
1405 | rcf->callSignalAddress.item, | 1422 | ret = set_sig_addr(pskb, ct, ctinfo, data, |
1406 | rcf->callSignalAddress.count); | 1423 | rcf->callSignalAddress.item, |
1424 | rcf->callSignalAddress.count); | ||
1407 | if (ret < 0) | 1425 | if (ret < 0) |
1408 | return -1; | 1426 | return -1; |
1409 | } | 1427 | } |
@@ -1417,7 +1435,7 @@ static int process_rcf(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1417 | DEBUGP | 1435 | DEBUGP |
1418 | ("ip_ct_ras: set RAS connection timeout to %u seconds\n", | 1436 | ("ip_ct_ras: set RAS connection timeout to %u seconds\n", |
1419 | info->timeout); | 1437 | info->timeout); |
1420 | ip_ct_refresh_acct(ct, ctinfo, NULL, info->timeout * HZ); | 1438 | ip_ct_refresh(ct, *pskb, info->timeout * HZ); |
1421 | 1439 | ||
1422 | /* Set expect timeout */ | 1440 | /* Set expect timeout */ |
1423 | read_lock_bh(&ip_conntrack_lock); | 1441 | read_lock_bh(&ip_conntrack_lock); |
@@ -1448,13 +1466,15 @@ static int process_urq(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1448 | struct ip_ct_h323_master *info = &ct->help.ct_h323_info; | 1466 | struct ip_ct_h323_master *info = &ct->help.ct_h323_info; |
1449 | int dir = CTINFO2DIR(ctinfo); | 1467 | int dir = CTINFO2DIR(ctinfo); |
1450 | int ret; | 1468 | int ret; |
1469 | typeof(set_sig_addr_hook) set_sig_addr; | ||
1451 | 1470 | ||
1452 | DEBUGP("ip_ct_ras: URQ\n"); | 1471 | DEBUGP("ip_ct_ras: URQ\n"); |
1453 | 1472 | ||
1454 | if (set_sig_addr_hook) { | 1473 | set_sig_addr = rcu_dereference(set_sig_addr_hook); |
1455 | ret = set_sig_addr_hook(pskb, ct, ctinfo, data, | 1474 | if (set_sig_addr) { |
1456 | urq->callSignalAddress.item, | 1475 | ret = set_sig_addr(pskb, ct, ctinfo, data, |
1457 | urq->callSignalAddress.count); | 1476 | urq->callSignalAddress.item, |
1477 | urq->callSignalAddress.count); | ||
1458 | if (ret < 0) | 1478 | if (ret < 0) |
1459 | return -1; | 1479 | return -1; |
1460 | } | 1480 | } |
@@ -1465,7 +1485,7 @@ static int process_urq(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1465 | info->sig_port[!dir] = 0; | 1485 | info->sig_port[!dir] = 0; |
1466 | 1486 | ||
1467 | /* Give it 30 seconds for UCF or URJ */ | 1487 | /* Give it 30 seconds for UCF or URJ */ |
1468 | ip_ct_refresh_acct(ct, ctinfo, NULL, 30 * HZ); | 1488 | ip_ct_refresh(ct, *pskb, 30 * HZ); |
1469 | 1489 | ||
1470 | return 0; | 1490 | return 0; |
1471 | } | 1491 | } |
@@ -1479,28 +1499,30 @@ static int process_arq(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1479 | int dir = CTINFO2DIR(ctinfo); | 1499 | int dir = CTINFO2DIR(ctinfo); |
1480 | __be32 ip; | 1500 | __be32 ip; |
1481 | u_int16_t port; | 1501 | u_int16_t port; |
1502 | typeof(set_h225_addr_hook) set_h225_addr; | ||
1482 | 1503 | ||
1483 | DEBUGP("ip_ct_ras: ARQ\n"); | 1504 | DEBUGP("ip_ct_ras: ARQ\n"); |
1484 | 1505 | ||
1506 | set_h225_addr = rcu_dereference(set_h225_addr_hook); | ||
1485 | if ((arq->options & eAdmissionRequest_destCallSignalAddress) && | 1507 | if ((arq->options & eAdmissionRequest_destCallSignalAddress) && |
1486 | get_h225_addr(*data, &arq->destCallSignalAddress, &ip, &port) && | 1508 | get_h225_addr(*data, &arq->destCallSignalAddress, &ip, &port) && |
1487 | ip == ct->tuplehash[dir].tuple.src.ip && | 1509 | ip == ct->tuplehash[dir].tuple.src.ip && |
1488 | port == info->sig_port[dir] && set_h225_addr_hook) { | 1510 | port == info->sig_port[dir] && set_h225_addr) { |
1489 | /* Answering ARQ */ | 1511 | /* Answering ARQ */ |
1490 | return set_h225_addr_hook(pskb, data, 0, | 1512 | return set_h225_addr(pskb, data, 0, |
1491 | &arq->destCallSignalAddress, | 1513 | &arq->destCallSignalAddress, |
1492 | ct->tuplehash[!dir].tuple.dst.ip, | 1514 | ct->tuplehash[!dir].tuple.dst.ip, |
1493 | info->sig_port[!dir]); | 1515 | info->sig_port[!dir]); |
1494 | } | 1516 | } |
1495 | 1517 | ||
1496 | if ((arq->options & eAdmissionRequest_srcCallSignalAddress) && | 1518 | if ((arq->options & eAdmissionRequest_srcCallSignalAddress) && |
1497 | get_h225_addr(*data, &arq->srcCallSignalAddress, &ip, &port) && | 1519 | get_h225_addr(*data, &arq->srcCallSignalAddress, &ip, &port) && |
1498 | ip == ct->tuplehash[dir].tuple.src.ip && set_h225_addr_hook) { | 1520 | ip == ct->tuplehash[dir].tuple.src.ip && set_h225_addr) { |
1499 | /* Calling ARQ */ | 1521 | /* Calling ARQ */ |
1500 | return set_h225_addr_hook(pskb, data, 0, | 1522 | return set_h225_addr(pskb, data, 0, |
1501 | &arq->srcCallSignalAddress, | 1523 | &arq->srcCallSignalAddress, |
1502 | ct->tuplehash[!dir].tuple.dst.ip, | 1524 | ct->tuplehash[!dir].tuple.dst.ip, |
1503 | port); | 1525 | port); |
1504 | } | 1526 | } |
1505 | 1527 | ||
1506 | return 0; | 1528 | return 0; |
@@ -1516,6 +1538,7 @@ static int process_acf(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1516 | __be32 ip; | 1538 | __be32 ip; |
1517 | u_int16_t port; | 1539 | u_int16_t port; |
1518 | struct ip_conntrack_expect *exp; | 1540 | struct ip_conntrack_expect *exp; |
1541 | typeof(set_sig_addr_hook) set_sig_addr; | ||
1519 | 1542 | ||
1520 | DEBUGP("ip_ct_ras: ACF\n"); | 1543 | DEBUGP("ip_ct_ras: ACF\n"); |
1521 | 1544 | ||
@@ -1523,10 +1546,10 @@ static int process_acf(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1523 | return 0; | 1546 | return 0; |
1524 | 1547 | ||
1525 | if (ip == ct->tuplehash[dir].tuple.dst.ip) { /* Answering ACF */ | 1548 | if (ip == ct->tuplehash[dir].tuple.dst.ip) { /* Answering ACF */ |
1526 | if (set_sig_addr_hook) | 1549 | set_sig_addr = rcu_dereference(set_sig_addr_hook); |
1527 | return set_sig_addr_hook(pskb, ct, ctinfo, data, | 1550 | if (set_sig_addr) |
1528 | &acf->destCallSignalAddress, | 1551 | return set_sig_addr(pskb, ct, ctinfo, data, |
1529 | 1); | 1552 | &acf->destCallSignalAddress, 1); |
1530 | return 0; | 1553 | return 0; |
1531 | } | 1554 | } |
1532 | 1555 | ||
@@ -1566,11 +1589,14 @@ static int process_lrq(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1566 | enum ip_conntrack_info ctinfo, | 1589 | enum ip_conntrack_info ctinfo, |
1567 | unsigned char **data, LocationRequest * lrq) | 1590 | unsigned char **data, LocationRequest * lrq) |
1568 | { | 1591 | { |
1592 | typeof(set_ras_addr_hook) set_ras_addr; | ||
1593 | |||
1569 | DEBUGP("ip_ct_ras: LRQ\n"); | 1594 | DEBUGP("ip_ct_ras: LRQ\n"); |
1570 | 1595 | ||
1571 | if (set_ras_addr_hook) | 1596 | set_ras_addr = rcu_dereference(set_ras_addr_hook); |
1572 | return set_ras_addr_hook(pskb, ct, ctinfo, data, | 1597 | if (set_ras_addr) |
1573 | &lrq->replyAddress, 1); | 1598 | return set_ras_addr(pskb, ct, ctinfo, data, |
1599 | &lrq->replyAddress, 1); | ||
1574 | return 0; | 1600 | return 0; |
1575 | } | 1601 | } |
1576 | 1602 | ||
@@ -1629,20 +1655,24 @@ static int process_irr(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
1629 | unsigned char **data, InfoRequestResponse * irr) | 1655 | unsigned char **data, InfoRequestResponse * irr) |
1630 | { | 1656 | { |
1631 | int ret; | 1657 | int ret; |
1658 | typeof(set_ras_addr_hook) set_ras_addr; | ||
1659 | typeof(set_sig_addr_hook) set_sig_addr; | ||
1632 | 1660 | ||
1633 | DEBUGP("ip_ct_ras: IRR\n"); | 1661 | DEBUGP("ip_ct_ras: IRR\n"); |
1634 | 1662 | ||
1635 | if (set_ras_addr_hook) { | 1663 | set_ras_addr = rcu_dereference(set_ras_addr_hook); |
1636 | ret = set_ras_addr_hook(pskb, ct, ctinfo, data, | 1664 | if (set_ras_addr) { |
1637 | &irr->rasAddress, 1); | 1665 | ret = set_ras_addr(pskb, ct, ctinfo, data, |
1666 | &irr->rasAddress, 1); | ||
1638 | if (ret < 0) | 1667 | if (ret < 0) |
1639 | return -1; | 1668 | return -1; |
1640 | } | 1669 | } |
1641 | 1670 | ||
1642 | if (set_sig_addr_hook) { | 1671 | set_sig_addr = rcu_dereference(set_sig_addr_hook); |
1643 | ret = set_sig_addr_hook(pskb, ct, ctinfo, data, | 1672 | if (set_sig_addr) { |
1644 | irr->callSignalAddress.item, | 1673 | ret = set_sig_addr(pskb, ct, ctinfo, data, |
1645 | irr->callSignalAddress.count); | 1674 | irr->callSignalAddress.item, |
1675 | irr->callSignalAddress.count); | ||
1646 | if (ret < 0) | 1676 | if (ret < 0) |
1647 | return -1; | 1677 | return -1; |
1648 | } | 1678 | } |
@@ -1746,7 +1776,7 @@ static struct ip_conntrack_helper ip_conntrack_helper_ras = { | |||
1746 | .me = THIS_MODULE, | 1776 | .me = THIS_MODULE, |
1747 | .max_expected = 32, | 1777 | .max_expected = 32, |
1748 | .timeout = 240, | 1778 | .timeout = 240, |
1749 | .tuple = {.src = {.u = {__constant_htons(RAS_PORT)}}, | 1779 | .tuple = {.src = {.u = {.tcp = {.port = __constant_htons(RAS_PORT)}}}, |
1750 | .dst = {.protonum = IPPROTO_UDP}}, | 1780 | .dst = {.protonum = IPPROTO_UDP}}, |
1751 | .mask = {.src = {.u = {0xFFFE}}, | 1781 | .mask = {.src = {.u = {0xFFFE}}, |
1752 | .dst = {.protonum = 0xFF}}, | 1782 | .dst = {.protonum = 0xFF}}, |
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c index a2af5e0c7f99..4d19373bbf0d 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c | |||
@@ -124,6 +124,8 @@ EXPORT_SYMBOL(pptp_msg_name); | |||
124 | static void pptp_expectfn(struct ip_conntrack *ct, | 124 | static void pptp_expectfn(struct ip_conntrack *ct, |
125 | struct ip_conntrack_expect *exp) | 125 | struct ip_conntrack_expect *exp) |
126 | { | 126 | { |
127 | typeof(ip_nat_pptp_hook_expectfn) ip_nat_pptp_expectfn; | ||
128 | |||
127 | DEBUGP("increasing timeouts\n"); | 129 | DEBUGP("increasing timeouts\n"); |
128 | 130 | ||
129 | /* increase timeout of GRE data channel conntrack entry */ | 131 | /* increase timeout of GRE data channel conntrack entry */ |
@@ -133,7 +135,9 @@ static void pptp_expectfn(struct ip_conntrack *ct, | |||
133 | /* Can you see how rusty this code is, compared with the pre-2.6.11 | 135 | /* Can you see how rusty this code is, compared with the pre-2.6.11 |
134 | * one? That's what happened to my shiny newnat of 2002 ;( -HW */ | 136 | * one? That's what happened to my shiny newnat of 2002 ;( -HW */ |
135 | 137 | ||
136 | if (!ip_nat_pptp_hook_expectfn) { | 138 | rcu_read_lock(); |
139 | ip_nat_pptp_expectfn = rcu_dereference(ip_nat_pptp_hook_expectfn); | ||
140 | if (!ip_nat_pptp_expectfn) { | ||
137 | struct ip_conntrack_tuple inv_t; | 141 | struct ip_conntrack_tuple inv_t; |
138 | struct ip_conntrack_expect *exp_other; | 142 | struct ip_conntrack_expect *exp_other; |
139 | 143 | ||
@@ -142,7 +146,7 @@ static void pptp_expectfn(struct ip_conntrack *ct, | |||
142 | DEBUGP("trying to unexpect other dir: "); | 146 | DEBUGP("trying to unexpect other dir: "); |
143 | DUMP_TUPLE(&inv_t); | 147 | DUMP_TUPLE(&inv_t); |
144 | 148 | ||
145 | exp_other = ip_conntrack_expect_find(&inv_t); | 149 | exp_other = ip_conntrack_expect_find_get(&inv_t); |
146 | if (exp_other) { | 150 | if (exp_other) { |
147 | /* delete other expectation. */ | 151 | /* delete other expectation. */ |
148 | DEBUGP("found\n"); | 152 | DEBUGP("found\n"); |
@@ -153,8 +157,9 @@ static void pptp_expectfn(struct ip_conntrack *ct, | |||
153 | } | 157 | } |
154 | } else { | 158 | } else { |
155 | /* we need more than simple inversion */ | 159 | /* we need more than simple inversion */ |
156 | ip_nat_pptp_hook_expectfn(ct, exp); | 160 | ip_nat_pptp_expectfn(ct, exp); |
157 | } | 161 | } |
162 | rcu_read_unlock(); | ||
158 | } | 163 | } |
159 | 164 | ||
160 | static int destroy_sibling_or_exp(const struct ip_conntrack_tuple *t) | 165 | static int destroy_sibling_or_exp(const struct ip_conntrack_tuple *t) |
@@ -176,7 +181,7 @@ static int destroy_sibling_or_exp(const struct ip_conntrack_tuple *t) | |||
176 | ip_conntrack_put(sibling); | 181 | ip_conntrack_put(sibling); |
177 | return 1; | 182 | return 1; |
178 | } else { | 183 | } else { |
179 | exp = ip_conntrack_expect_find(t); | 184 | exp = ip_conntrack_expect_find_get(t); |
180 | if (exp) { | 185 | if (exp) { |
181 | DEBUGP("unexpect_related of expect %p\n", exp); | 186 | DEBUGP("unexpect_related of expect %p\n", exp); |
182 | ip_conntrack_unexpect_related(exp); | 187 | ip_conntrack_unexpect_related(exp); |
@@ -226,6 +231,7 @@ exp_gre(struct ip_conntrack *ct, | |||
226 | { | 231 | { |
227 | struct ip_conntrack_expect *exp_orig, *exp_reply; | 232 | struct ip_conntrack_expect *exp_orig, *exp_reply; |
228 | int ret = 1; | 233 | int ret = 1; |
234 | typeof(ip_nat_pptp_hook_exp_gre) ip_nat_pptp_exp_gre; | ||
229 | 235 | ||
230 | exp_orig = ip_conntrack_expect_alloc(ct); | 236 | exp_orig = ip_conntrack_expect_alloc(ct); |
231 | if (exp_orig == NULL) | 237 | if (exp_orig == NULL) |
@@ -262,8 +268,9 @@ exp_gre(struct ip_conntrack *ct, | |||
262 | exp_reply->tuple.dst.u.gre.key = peer_callid; | 268 | exp_reply->tuple.dst.u.gre.key = peer_callid; |
263 | exp_reply->tuple.dst.protonum = IPPROTO_GRE; | 269 | exp_reply->tuple.dst.protonum = IPPROTO_GRE; |
264 | 270 | ||
265 | if (ip_nat_pptp_hook_exp_gre) | 271 | ip_nat_pptp_exp_gre = rcu_dereference(ip_nat_pptp_hook_exp_gre); |
266 | ip_nat_pptp_hook_exp_gre(exp_orig, exp_reply); | 272 | if (ip_nat_pptp_exp_gre) |
273 | ip_nat_pptp_exp_gre(exp_orig, exp_reply); | ||
267 | if (ip_conntrack_expect_related(exp_orig) != 0) | 274 | if (ip_conntrack_expect_related(exp_orig) != 0) |
268 | goto out_put_both; | 275 | goto out_put_both; |
269 | if (ip_conntrack_expect_related(exp_reply) != 0) | 276 | if (ip_conntrack_expect_related(exp_reply) != 0) |
@@ -303,6 +310,7 @@ pptp_inbound_pkt(struct sk_buff **pskb, | |||
303 | struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; | 310 | struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; |
304 | u_int16_t msg; | 311 | u_int16_t msg; |
305 | __be16 cid = 0, pcid = 0; | 312 | __be16 cid = 0, pcid = 0; |
313 | typeof(ip_nat_pptp_hook_inbound) ip_nat_pptp_inbound; | ||
306 | 314 | ||
307 | msg = ntohs(ctlh->messageType); | 315 | msg = ntohs(ctlh->messageType); |
308 | DEBUGP("inbound control message %s\n", pptp_msg_name[msg]); | 316 | DEBUGP("inbound control message %s\n", pptp_msg_name[msg]); |
@@ -402,9 +410,9 @@ pptp_inbound_pkt(struct sk_buff **pskb, | |||
402 | goto invalid; | 410 | goto invalid; |
403 | } | 411 | } |
404 | 412 | ||
405 | if (ip_nat_pptp_hook_inbound) | 413 | ip_nat_pptp_inbound = rcu_dereference(ip_nat_pptp_hook_inbound); |
406 | return ip_nat_pptp_hook_inbound(pskb, ct, ctinfo, ctlh, | 414 | if (ip_nat_pptp_inbound) |
407 | pptpReq); | 415 | return ip_nat_pptp_inbound(pskb, ct, ctinfo, ctlh, pptpReq); |
408 | return NF_ACCEPT; | 416 | return NF_ACCEPT; |
409 | 417 | ||
410 | invalid: | 418 | invalid: |
@@ -427,6 +435,7 @@ pptp_outbound_pkt(struct sk_buff **pskb, | |||
427 | struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; | 435 | struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; |
428 | u_int16_t msg; | 436 | u_int16_t msg; |
429 | __be16 cid = 0, pcid = 0; | 437 | __be16 cid = 0, pcid = 0; |
438 | typeof(ip_nat_pptp_hook_outbound) ip_nat_pptp_outbound; | ||
430 | 439 | ||
431 | msg = ntohs(ctlh->messageType); | 440 | msg = ntohs(ctlh->messageType); |
432 | DEBUGP("outbound control message %s\n", pptp_msg_name[msg]); | 441 | DEBUGP("outbound control message %s\n", pptp_msg_name[msg]); |
@@ -492,9 +501,9 @@ pptp_outbound_pkt(struct sk_buff **pskb, | |||
492 | goto invalid; | 501 | goto invalid; |
493 | } | 502 | } |
494 | 503 | ||
495 | if (ip_nat_pptp_hook_outbound) | 504 | ip_nat_pptp_outbound = rcu_dereference(ip_nat_pptp_hook_outbound); |
496 | return ip_nat_pptp_hook_outbound(pskb, ct, ctinfo, ctlh, | 505 | if (ip_nat_pptp_outbound) |
497 | pptpReq); | 506 | return ip_nat_pptp_outbound(pskb, ct, ctinfo, ctlh, pptpReq); |
498 | return NF_ACCEPT; | 507 | return NF_ACCEPT; |
499 | 508 | ||
500 | invalid: | 509 | invalid: |
diff --git a/net/ipv4/netfilter/ip_conntrack_irc.c b/net/ipv4/netfilter/ip_conntrack_irc.c index 75f7c3db1619..91832eca4106 100644 --- a/net/ipv4/netfilter/ip_conntrack_irc.c +++ b/net/ipv4/netfilter/ip_conntrack_irc.c | |||
@@ -114,6 +114,7 @@ static int help(struct sk_buff **pskb, | |||
114 | u_int16_t dcc_port; | 114 | u_int16_t dcc_port; |
115 | int i, ret = NF_ACCEPT; | 115 | int i, ret = NF_ACCEPT; |
116 | char *addr_beg_p, *addr_end_p; | 116 | char *addr_beg_p, *addr_end_p; |
117 | typeof(ip_nat_irc_hook) ip_nat_irc; | ||
117 | 118 | ||
118 | DEBUGP("entered\n"); | 119 | DEBUGP("entered\n"); |
119 | 120 | ||
@@ -222,11 +223,12 @@ static int help(struct sk_buff **pskb, | |||
222 | { .tcp = { htons(0xFFFF) } }, 0xFF }}); | 223 | { .tcp = { htons(0xFFFF) } }, 0xFF }}); |
223 | exp->expectfn = NULL; | 224 | exp->expectfn = NULL; |
224 | exp->flags = 0; | 225 | exp->flags = 0; |
225 | if (ip_nat_irc_hook) | 226 | ip_nat_irc = rcu_dereference(ip_nat_irc_hook); |
226 | ret = ip_nat_irc_hook(pskb, ctinfo, | 227 | if (ip_nat_irc) |
227 | addr_beg_p - ib_ptr, | 228 | ret = ip_nat_irc(pskb, ctinfo, |
228 | addr_end_p - addr_beg_p, | 229 | addr_beg_p - ib_ptr, |
229 | exp); | 230 | addr_end_p - addr_beg_p, |
231 | exp); | ||
230 | else if (ip_conntrack_expect_related(exp) != 0) | 232 | else if (ip_conntrack_expect_related(exp) != 0) |
231 | ret = NF_DROP; | 233 | ret = NF_DROP; |
232 | ip_conntrack_expect_put(exp); | 234 | ip_conntrack_expect_put(exp); |
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c index 262d0d44ec1b..5fcf91d617cd 100644 --- a/net/ipv4/netfilter/ip_conntrack_netlink.c +++ b/net/ipv4/netfilter/ip_conntrack_netlink.c | |||
@@ -153,6 +153,7 @@ ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct ip_conntrack *ct) | |||
153 | return ret; | 153 | return ret; |
154 | 154 | ||
155 | nfattr_failure: | 155 | nfattr_failure: |
156 | ip_conntrack_proto_put(proto); | ||
156 | return -1; | 157 | return -1; |
157 | } | 158 | } |
158 | 159 | ||
@@ -319,8 +320,6 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, | |||
319 | } else if (events & (IPCT_NEW | IPCT_RELATED)) { | 320 | } else if (events & (IPCT_NEW | IPCT_RELATED)) { |
320 | type = IPCTNL_MSG_CT_NEW; | 321 | type = IPCTNL_MSG_CT_NEW; |
321 | flags = NLM_F_CREATE|NLM_F_EXCL; | 322 | flags = NLM_F_CREATE|NLM_F_EXCL; |
322 | /* dump everything */ | ||
323 | events = ~0UL; | ||
324 | group = NFNLGRP_CONNTRACK_NEW; | 323 | group = NFNLGRP_CONNTRACK_NEW; |
325 | } else if (events & (IPCT_STATUS | IPCT_PROTOINFO)) { | 324 | } else if (events & (IPCT_STATUS | IPCT_PROTOINFO)) { |
326 | type = IPCTNL_MSG_CT_NEW; | 325 | type = IPCTNL_MSG_CT_NEW; |
@@ -355,28 +354,35 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, | |||
355 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) | 354 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) |
356 | goto nfattr_failure; | 355 | goto nfattr_failure; |
357 | NFA_NEST_END(skb, nest_parms); | 356 | NFA_NEST_END(skb, nest_parms); |
358 | |||
359 | /* NAT stuff is now a status flag */ | ||
360 | if ((events & IPCT_STATUS || events & IPCT_NATINFO) | ||
361 | && ctnetlink_dump_status(skb, ct) < 0) | ||
362 | goto nfattr_failure; | ||
363 | if (events & IPCT_REFRESH | ||
364 | && ctnetlink_dump_timeout(skb, ct) < 0) | ||
365 | goto nfattr_failure; | ||
366 | if (events & IPCT_PROTOINFO | ||
367 | && ctnetlink_dump_protoinfo(skb, ct) < 0) | ||
368 | goto nfattr_failure; | ||
369 | if (events & IPCT_HELPINFO | ||
370 | && ctnetlink_dump_helpinfo(skb, ct) < 0) | ||
371 | goto nfattr_failure; | ||
372 | 357 | ||
373 | if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || | 358 | if (events & IPCT_DESTROY) { |
374 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) | 359 | if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || |
375 | goto nfattr_failure; | 360 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) |
361 | goto nfattr_failure; | ||
362 | } else { | ||
363 | if (ctnetlink_dump_status(skb, ct) < 0) | ||
364 | goto nfattr_failure; | ||
376 | 365 | ||
377 | if (events & IPCT_MARK | 366 | if (ctnetlink_dump_timeout(skb, ct) < 0) |
378 | && ctnetlink_dump_mark(skb, ct) < 0) | 367 | goto nfattr_failure; |
379 | goto nfattr_failure; | 368 | |
369 | if (events & IPCT_PROTOINFO | ||
370 | && ctnetlink_dump_protoinfo(skb, ct) < 0) | ||
371 | goto nfattr_failure; | ||
372 | |||
373 | if ((events & IPCT_HELPER || ct->helper) | ||
374 | && ctnetlink_dump_helpinfo(skb, ct) < 0) | ||
375 | goto nfattr_failure; | ||
376 | |||
377 | if ((events & IPCT_MARK || ct->mark) | ||
378 | && ctnetlink_dump_mark(skb, ct) < 0) | ||
379 | goto nfattr_failure; | ||
380 | |||
381 | if (events & IPCT_COUNTER_FILLING && | ||
382 | (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || | ||
383 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)) | ||
384 | goto nfattr_failure; | ||
385 | } | ||
380 | 386 | ||
381 | nlh->nlmsg_len = skb->tail - b; | 387 | nlh->nlmsg_len = skb->tail - b; |
382 | nfnetlink_send(skb, 0, group, 0); | 388 | nfnetlink_send(skb, 0, group, 0); |
@@ -742,7 +748,6 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
742 | ip_conntrack_put(ct); | 748 | ip_conntrack_put(ct); |
743 | return -ENOMEM; | 749 | return -ENOMEM; |
744 | } | 750 | } |
745 | NETLINK_CB(skb2).dst_pid = NETLINK_CB(skb).pid; | ||
746 | 751 | ||
747 | err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, | 752 | err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, |
748 | IPCTNL_MSG_CT_NEW, 1, ct); | 753 | IPCTNL_MSG_CT_NEW, 1, ct); |
@@ -945,9 +950,11 @@ ctnetlink_create_conntrack(struct nfattr *cda[], | |||
945 | ct->timeout.expires = jiffies + ct->timeout.expires * HZ; | 950 | ct->timeout.expires = jiffies + ct->timeout.expires * HZ; |
946 | ct->status |= IPS_CONFIRMED; | 951 | ct->status |= IPS_CONFIRMED; |
947 | 952 | ||
948 | err = ctnetlink_change_status(ct, cda); | 953 | if (cda[CTA_STATUS-1]) { |
949 | if (err < 0) | 954 | err = ctnetlink_change_status(ct, cda); |
950 | goto err; | 955 | if (err < 0) |
956 | goto err; | ||
957 | } | ||
951 | 958 | ||
952 | if (cda[CTA_PROTOINFO-1]) { | 959 | if (cda[CTA_PROTOINFO-1]) { |
953 | err = ctnetlink_change_protoinfo(ct, cda); | 960 | err = ctnetlink_change_protoinfo(ct, cda); |
@@ -1256,7 +1263,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1256 | if (err < 0) | 1263 | if (err < 0) |
1257 | return err; | 1264 | return err; |
1258 | 1265 | ||
1259 | exp = ip_conntrack_expect_find(&tuple); | 1266 | exp = ip_conntrack_expect_find_get(&tuple); |
1260 | if (!exp) | 1267 | if (!exp) |
1261 | return -ENOENT; | 1268 | return -ENOENT; |
1262 | 1269 | ||
@@ -1272,8 +1279,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1272 | skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); | 1279 | skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
1273 | if (!skb2) | 1280 | if (!skb2) |
1274 | goto out; | 1281 | goto out; |
1275 | NETLINK_CB(skb2).dst_pid = NETLINK_CB(skb).pid; | 1282 | |
1276 | |||
1277 | err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, | 1283 | err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, |
1278 | nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, | 1284 | nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, |
1279 | 1, exp); | 1285 | 1, exp); |
@@ -1310,7 +1316,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1310 | return err; | 1316 | return err; |
1311 | 1317 | ||
1312 | /* bump usage count to 2 */ | 1318 | /* bump usage count to 2 */ |
1313 | exp = ip_conntrack_expect_find(&tuple); | 1319 | exp = ip_conntrack_expect_find_get(&tuple); |
1314 | if (!exp) | 1320 | if (!exp) |
1315 | return -ENOENT; | 1321 | return -ENOENT; |
1316 | 1322 | ||
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_gre.c b/net/ipv4/netfilter/ip_conntrack_proto_gre.c index 5fe026f467d3..ac1c49ef36a9 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_gre.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_gre.c | |||
@@ -34,8 +34,6 @@ | |||
34 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
35 | 35 | ||
36 | static DEFINE_RWLOCK(ip_ct_gre_lock); | 36 | static DEFINE_RWLOCK(ip_ct_gre_lock); |
37 | #define ASSERT_READ_LOCK(x) | ||
38 | #define ASSERT_WRITE_LOCK(x) | ||
39 | 37 | ||
40 | #include <linux/netfilter_ipv4/ip_conntrack_protocol.h> | 38 | #include <linux/netfilter_ipv4/ip_conntrack_protocol.h> |
41 | #include <linux/netfilter_ipv4/ip_conntrack_helper.h> | 39 | #include <linux/netfilter_ipv4/ip_conntrack_helper.h> |
diff --git a/net/ipv4/netfilter/ip_conntrack_sip.c b/net/ipv4/netfilter/ip_conntrack_sip.c index f4f75995a9e4..3a26d63eed88 100644 --- a/net/ipv4/netfilter/ip_conntrack_sip.c +++ b/net/ipv4/netfilter/ip_conntrack_sip.c | |||
@@ -52,20 +52,56 @@ unsigned int (*ip_nat_sdp_hook)(struct sk_buff **pskb, | |||
52 | const char *dptr); | 52 | const char *dptr); |
53 | EXPORT_SYMBOL_GPL(ip_nat_sdp_hook); | 53 | EXPORT_SYMBOL_GPL(ip_nat_sdp_hook); |
54 | 54 | ||
55 | int ct_sip_get_info(const char *dptr, size_t dlen, | ||
56 | unsigned int *matchoff, | ||
57 | unsigned int *matchlen, | ||
58 | struct sip_header_nfo *hnfo); | ||
59 | EXPORT_SYMBOL_GPL(ct_sip_get_info); | ||
60 | |||
61 | |||
62 | static int digits_len(const char *dptr, const char *limit, int *shift); | 55 | static int digits_len(const char *dptr, const char *limit, int *shift); |
63 | static int epaddr_len(const char *dptr, const char *limit, int *shift); | 56 | static int epaddr_len(const char *dptr, const char *limit, int *shift); |
64 | static int skp_digits_len(const char *dptr, const char *limit, int *shift); | 57 | static int skp_digits_len(const char *dptr, const char *limit, int *shift); |
65 | static int skp_epaddr_len(const char *dptr, const char *limit, int *shift); | 58 | static int skp_epaddr_len(const char *dptr, const char *limit, int *shift); |
66 | 59 | ||
67 | struct sip_header_nfo ct_sip_hdrs[] = { | 60 | struct sip_header_nfo { |
68 | { /* Via header */ | 61 | const char *lname; |
62 | const char *sname; | ||
63 | const char *ln_str; | ||
64 | size_t lnlen; | ||
65 | size_t snlen; | ||
66 | size_t ln_strlen; | ||
67 | int case_sensitive; | ||
68 | int (*match_len)(const char *, const char *, int *); | ||
69 | }; | ||
70 | |||
71 | static struct sip_header_nfo ct_sip_hdrs[] = { | ||
72 | [POS_REG_REQ_URI] = { /* SIP REGISTER request URI */ | ||
73 | .lname = "sip:", | ||
74 | .lnlen = sizeof("sip:") - 1, | ||
75 | .ln_str = ":", | ||
76 | .ln_strlen = sizeof(":") - 1, | ||
77 | .match_len = epaddr_len | ||
78 | }, | ||
79 | [POS_REQ_URI] = { /* SIP request URI */ | ||
80 | .lname = "sip:", | ||
81 | .lnlen = sizeof("sip:") - 1, | ||
82 | .ln_str = "@", | ||
83 | .ln_strlen = sizeof("@") - 1, | ||
84 | .match_len = epaddr_len | ||
85 | }, | ||
86 | [POS_FROM] = { /* SIP From header */ | ||
87 | .lname = "From:", | ||
88 | .lnlen = sizeof("From:") - 1, | ||
89 | .sname = "\r\nf:", | ||
90 | .snlen = sizeof("\r\nf:") - 1, | ||
91 | .ln_str = "sip:", | ||
92 | .ln_strlen = sizeof("sip:") - 1, | ||
93 | .match_len = skp_epaddr_len, | ||
94 | }, | ||
95 | [POS_TO] = { /* SIP To header */ | ||
96 | .lname = "To:", | ||
97 | .lnlen = sizeof("To:") - 1, | ||
98 | .sname = "\r\nt:", | ||
99 | .snlen = sizeof("\r\nt:") - 1, | ||
100 | .ln_str = "sip:", | ||
101 | .ln_strlen = sizeof("sip:") - 1, | ||
102 | .match_len = skp_epaddr_len, | ||
103 | }, | ||
104 | [POS_VIA] = { /* SIP Via header */ | ||
69 | .lname = "Via:", | 105 | .lname = "Via:", |
70 | .lnlen = sizeof("Via:") - 1, | 106 | .lnlen = sizeof("Via:") - 1, |
71 | .sname = "\r\nv:", | 107 | .sname = "\r\nv:", |
@@ -74,7 +110,7 @@ struct sip_header_nfo ct_sip_hdrs[] = { | |||
74 | .ln_strlen = sizeof("UDP ") - 1, | 110 | .ln_strlen = sizeof("UDP ") - 1, |
75 | .match_len = epaddr_len, | 111 | .match_len = epaddr_len, |
76 | }, | 112 | }, |
77 | { /* Contact header */ | 113 | [POS_CONTACT] = { /* SIP Contact header */ |
78 | .lname = "Contact:", | 114 | .lname = "Contact:", |
79 | .lnlen = sizeof("Contact:") - 1, | 115 | .lnlen = sizeof("Contact:") - 1, |
80 | .sname = "\r\nm:", | 116 | .sname = "\r\nm:", |
@@ -83,7 +119,7 @@ struct sip_header_nfo ct_sip_hdrs[] = { | |||
83 | .ln_strlen = sizeof("sip:") - 1, | 119 | .ln_strlen = sizeof("sip:") - 1, |
84 | .match_len = skp_epaddr_len | 120 | .match_len = skp_epaddr_len |
85 | }, | 121 | }, |
86 | { /* Content length header */ | 122 | [POS_CONTENT] = { /* SIP Content length header */ |
87 | .lname = "Content-Length:", | 123 | .lname = "Content-Length:", |
88 | .lnlen = sizeof("Content-Length:") - 1, | 124 | .lnlen = sizeof("Content-Length:") - 1, |
89 | .sname = "\r\nl:", | 125 | .sname = "\r\nl:", |
@@ -92,7 +128,8 @@ struct sip_header_nfo ct_sip_hdrs[] = { | |||
92 | .ln_strlen = sizeof(":") - 1, | 128 | .ln_strlen = sizeof(":") - 1, |
93 | .match_len = skp_digits_len | 129 | .match_len = skp_digits_len |
94 | }, | 130 | }, |
95 | { /* SDP media info */ | 131 | [POS_MEDIA] = { /* SDP media info */ |
132 | .case_sensitive = 1, | ||
96 | .lname = "\nm=", | 133 | .lname = "\nm=", |
97 | .lnlen = sizeof("\nm=") - 1, | 134 | .lnlen = sizeof("\nm=") - 1, |
98 | .sname = "\rm=", | 135 | .sname = "\rm=", |
@@ -101,7 +138,8 @@ struct sip_header_nfo ct_sip_hdrs[] = { | |||
101 | .ln_strlen = sizeof("audio ") - 1, | 138 | .ln_strlen = sizeof("audio ") - 1, |
102 | .match_len = digits_len | 139 | .match_len = digits_len |
103 | }, | 140 | }, |
104 | { /* SDP owner address*/ | 141 | [POS_OWNER] = { /* SDP owner address*/ |
142 | .case_sensitive = 1, | ||
105 | .lname = "\no=", | 143 | .lname = "\no=", |
106 | .lnlen = sizeof("\no=") - 1, | 144 | .lnlen = sizeof("\no=") - 1, |
107 | .sname = "\ro=", | 145 | .sname = "\ro=", |
@@ -110,7 +148,8 @@ struct sip_header_nfo ct_sip_hdrs[] = { | |||
110 | .ln_strlen = sizeof("IN IP4 ") - 1, | 148 | .ln_strlen = sizeof("IN IP4 ") - 1, |
111 | .match_len = epaddr_len | 149 | .match_len = epaddr_len |
112 | }, | 150 | }, |
113 | { /* SDP connection info */ | 151 | [POS_CONNECTION] = { /* SDP connection info */ |
152 | .case_sensitive = 1, | ||
114 | .lname = "\nc=", | 153 | .lname = "\nc=", |
115 | .lnlen = sizeof("\nc=") - 1, | 154 | .lnlen = sizeof("\nc=") - 1, |
116 | .sname = "\rc=", | 155 | .sname = "\rc=", |
@@ -119,16 +158,8 @@ struct sip_header_nfo ct_sip_hdrs[] = { | |||
119 | .ln_strlen = sizeof("IN IP4 ") - 1, | 158 | .ln_strlen = sizeof("IN IP4 ") - 1, |
120 | .match_len = epaddr_len | 159 | .match_len = epaddr_len |
121 | }, | 160 | }, |
122 | { /* Requests headers */ | 161 | [POS_SDP_HEADER] = { /* SDP version header */ |
123 | .lname = "sip:", | 162 | .case_sensitive = 1, |
124 | .lnlen = sizeof("sip:") - 1, | ||
125 | .sname = "sip:", | ||
126 | .snlen = sizeof("sip:") - 1, /* yes, i know.. ;) */ | ||
127 | .ln_str = "@", | ||
128 | .ln_strlen = sizeof("@") - 1, | ||
129 | .match_len = epaddr_len | ||
130 | }, | ||
131 | { /* SDP version header */ | ||
132 | .lname = "\nv=", | 163 | .lname = "\nv=", |
133 | .lnlen = sizeof("\nv=") - 1, | 164 | .lnlen = sizeof("\nv=") - 1, |
134 | .sname = "\rv=", | 165 | .sname = "\rv=", |
@@ -138,7 +169,6 @@ struct sip_header_nfo ct_sip_hdrs[] = { | |||
138 | .match_len = digits_len | 169 | .match_len = digits_len |
139 | } | 170 | } |
140 | }; | 171 | }; |
141 | EXPORT_SYMBOL_GPL(ct_sip_hdrs); | ||
142 | 172 | ||
143 | /* get line lenght until first CR or LF seen. */ | 173 | /* get line lenght until first CR or LF seen. */ |
144 | int ct_sip_lnlen(const char *line, const char *limit) | 174 | int ct_sip_lnlen(const char *line, const char *limit) |
@@ -159,13 +189,19 @@ EXPORT_SYMBOL_GPL(ct_sip_lnlen); | |||
159 | 189 | ||
160 | /* Linear string search, case sensitive. */ | 190 | /* Linear string search, case sensitive. */ |
161 | const char *ct_sip_search(const char *needle, const char *haystack, | 191 | const char *ct_sip_search(const char *needle, const char *haystack, |
162 | size_t needle_len, size_t haystack_len) | 192 | size_t needle_len, size_t haystack_len, |
193 | int case_sensitive) | ||
163 | { | 194 | { |
164 | const char *limit = haystack + (haystack_len - needle_len); | 195 | const char *limit = haystack + (haystack_len - needle_len); |
165 | 196 | ||
166 | while (haystack <= limit) { | 197 | while (haystack <= limit) { |
167 | if (memcmp(haystack, needle, needle_len) == 0) | 198 | if (case_sensitive) { |
168 | return haystack; | 199 | if (strncmp(haystack, needle, needle_len) == 0) |
200 | return haystack; | ||
201 | } else { | ||
202 | if (strnicmp(haystack, needle, needle_len) == 0) | ||
203 | return haystack; | ||
204 | } | ||
169 | haystack++; | 205 | haystack++; |
170 | } | 206 | } |
171 | return NULL; | 207 | return NULL; |
@@ -263,8 +299,9 @@ static int skp_epaddr_len(const char *dptr, const char *limit, int *shift) | |||
263 | int ct_sip_get_info(const char *dptr, size_t dlen, | 299 | int ct_sip_get_info(const char *dptr, size_t dlen, |
264 | unsigned int *matchoff, | 300 | unsigned int *matchoff, |
265 | unsigned int *matchlen, | 301 | unsigned int *matchlen, |
266 | struct sip_header_nfo *hnfo) | 302 | enum sip_header_pos pos) |
267 | { | 303 | { |
304 | struct sip_header_nfo *hnfo = &ct_sip_hdrs[pos]; | ||
268 | const char *limit, *aux, *k = dptr; | 305 | const char *limit, *aux, *k = dptr; |
269 | int shift = 0; | 306 | int shift = 0; |
270 | 307 | ||
@@ -272,12 +309,14 @@ int ct_sip_get_info(const char *dptr, size_t dlen, | |||
272 | 309 | ||
273 | while (dptr <= limit) { | 310 | while (dptr <= limit) { |
274 | if ((strncmp(dptr, hnfo->lname, hnfo->lnlen) != 0) && | 311 | if ((strncmp(dptr, hnfo->lname, hnfo->lnlen) != 0) && |
275 | (strncmp(dptr, hnfo->sname, hnfo->snlen) != 0)) { | 312 | (hnfo->sname == NULL || |
313 | strncmp(dptr, hnfo->sname, hnfo->snlen) != 0)) { | ||
276 | dptr++; | 314 | dptr++; |
277 | continue; | 315 | continue; |
278 | } | 316 | } |
279 | aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen, | 317 | aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen, |
280 | ct_sip_lnlen(dptr, limit)); | 318 | ct_sip_lnlen(dptr, limit), |
319 | hnfo->case_sensitive); | ||
281 | if (!aux) { | 320 | if (!aux) { |
282 | DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str, | 321 | DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str, |
283 | hnfo->lname); | 322 | hnfo->lname); |
@@ -298,6 +337,7 @@ int ct_sip_get_info(const char *dptr, size_t dlen, | |||
298 | DEBUGP("%s header not found.\n", hnfo->lname); | 337 | DEBUGP("%s header not found.\n", hnfo->lname); |
299 | return 0; | 338 | return 0; |
300 | } | 339 | } |
340 | EXPORT_SYMBOL_GPL(ct_sip_get_info); | ||
301 | 341 | ||
302 | static int set_expected_rtp(struct sk_buff **pskb, | 342 | static int set_expected_rtp(struct sk_buff **pskb, |
303 | struct ip_conntrack *ct, | 343 | struct ip_conntrack *ct, |
@@ -308,6 +348,7 @@ static int set_expected_rtp(struct sk_buff **pskb, | |||
308 | struct ip_conntrack_expect *exp; | 348 | struct ip_conntrack_expect *exp; |
309 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 349 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
310 | int ret; | 350 | int ret; |
351 | typeof(ip_nat_sdp_hook) ip_nat_sdp; | ||
311 | 352 | ||
312 | exp = ip_conntrack_expect_alloc(ct); | 353 | exp = ip_conntrack_expect_alloc(ct); |
313 | if (exp == NULL) | 354 | if (exp == NULL) |
@@ -328,8 +369,9 @@ static int set_expected_rtp(struct sk_buff **pskb, | |||
328 | exp->expectfn = NULL; | 369 | exp->expectfn = NULL; |
329 | exp->flags = 0; | 370 | exp->flags = 0; |
330 | 371 | ||
331 | if (ip_nat_sdp_hook) | 372 | ip_nat_sdp = rcu_dereference(ip_nat_sdp_hook); |
332 | ret = ip_nat_sdp_hook(pskb, ctinfo, exp, dptr); | 373 | if (ip_nat_sdp) |
374 | ret = ip_nat_sdp(pskb, ctinfo, exp, dptr); | ||
333 | else { | 375 | else { |
334 | if (ip_conntrack_expect_related(exp) != 0) | 376 | if (ip_conntrack_expect_related(exp) != 0) |
335 | ret = NF_DROP; | 377 | ret = NF_DROP; |
@@ -351,6 +393,7 @@ static int sip_help(struct sk_buff **pskb, | |||
351 | int matchoff, matchlen; | 393 | int matchoff, matchlen; |
352 | __be32 ipaddr; | 394 | __be32 ipaddr; |
353 | u_int16_t port; | 395 | u_int16_t port; |
396 | typeof(ip_nat_sip_hook) ip_nat_sip; | ||
354 | 397 | ||
355 | /* No Data ? */ | 398 | /* No Data ? */ |
356 | dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); | 399 | dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); |
@@ -368,8 +411,9 @@ static int sip_help(struct sk_buff **pskb, | |||
368 | goto out; | 411 | goto out; |
369 | } | 412 | } |
370 | 413 | ||
371 | if (ip_nat_sip_hook) { | 414 | ip_nat_sip = rcu_dereference(ip_nat_sip_hook); |
372 | if (!ip_nat_sip_hook(pskb, ctinfo, ct, &dptr)) { | 415 | if (ip_nat_sip) { |
416 | if (!ip_nat_sip(pskb, ctinfo, ct, &dptr)) { | ||
373 | ret = NF_DROP; | 417 | ret = NF_DROP; |
374 | goto out; | 418 | goto out; |
375 | } | 419 | } |
@@ -389,7 +433,7 @@ static int sip_help(struct sk_buff **pskb, | |||
389 | } | 433 | } |
390 | /* Get ip and port address from SDP packet. */ | 434 | /* Get ip and port address from SDP packet. */ |
391 | if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen, | 435 | if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen, |
392 | &ct_sip_hdrs[POS_CONNECTION]) > 0) { | 436 | POS_CONNECTION) > 0) { |
393 | 437 | ||
394 | /* We'll drop only if there are parse problems. */ | 438 | /* We'll drop only if there are parse problems. */ |
395 | if (parse_ipaddr(dptr + matchoff, NULL, &ipaddr, | 439 | if (parse_ipaddr(dptr + matchoff, NULL, &ipaddr, |
@@ -398,7 +442,7 @@ static int sip_help(struct sk_buff **pskb, | |||
398 | goto out; | 442 | goto out; |
399 | } | 443 | } |
400 | if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen, | 444 | if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen, |
401 | &ct_sip_hdrs[POS_MEDIA]) > 0) { | 445 | POS_MEDIA) > 0) { |
402 | 446 | ||
403 | port = simple_strtoul(dptr + matchoff, NULL, 10); | 447 | port = simple_strtoul(dptr + matchoff, NULL, 10); |
404 | if (port < 1024) { | 448 | if (port < 1024) { |
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c index 02135756562e..86efb5449676 100644 --- a/net/ipv4/netfilter/ip_conntrack_standalone.c +++ b/net/ipv4/netfilter/ip_conntrack_standalone.c | |||
@@ -28,9 +28,6 @@ | |||
28 | #include <net/ip.h> | 28 | #include <net/ip.h> |
29 | #include <net/route.h> | 29 | #include <net/route.h> |
30 | 30 | ||
31 | #define ASSERT_READ_LOCK(x) | ||
32 | #define ASSERT_WRITE_LOCK(x) | ||
33 | |||
34 | #include <linux/netfilter_ipv4/ip_conntrack.h> | 31 | #include <linux/netfilter_ipv4/ip_conntrack.h> |
35 | #include <linux/netfilter_ipv4/ip_conntrack_protocol.h> | 32 | #include <linux/netfilter_ipv4/ip_conntrack_protocol.h> |
36 | #include <linux/netfilter_ipv4/ip_conntrack_core.h> | 33 | #include <linux/netfilter_ipv4/ip_conntrack_core.h> |
@@ -139,7 +136,6 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
139 | const struct ip_conntrack *conntrack = tuplehash_to_ctrack(hash); | 136 | const struct ip_conntrack *conntrack = tuplehash_to_ctrack(hash); |
140 | struct ip_conntrack_protocol *proto; | 137 | struct ip_conntrack_protocol *proto; |
141 | 138 | ||
142 | ASSERT_READ_LOCK(&ip_conntrack_lock); | ||
143 | IP_NF_ASSERT(conntrack); | 139 | IP_NF_ASSERT(conntrack); |
144 | 140 | ||
145 | /* we only want to print DIR_ORIGINAL */ | 141 | /* we only want to print DIR_ORIGINAL */ |
@@ -926,7 +922,7 @@ EXPORT_SYMBOL(__ip_ct_refresh_acct); | |||
926 | EXPORT_SYMBOL(ip_conntrack_expect_alloc); | 922 | EXPORT_SYMBOL(ip_conntrack_expect_alloc); |
927 | EXPORT_SYMBOL(ip_conntrack_expect_put); | 923 | EXPORT_SYMBOL(ip_conntrack_expect_put); |
928 | EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find); | 924 | EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find); |
929 | EXPORT_SYMBOL_GPL(ip_conntrack_expect_find); | 925 | EXPORT_SYMBOL_GPL(ip_conntrack_expect_find_get); |
930 | EXPORT_SYMBOL(ip_conntrack_expect_related); | 926 | EXPORT_SYMBOL(ip_conntrack_expect_related); |
931 | EXPORT_SYMBOL(ip_conntrack_unexpect_related); | 927 | EXPORT_SYMBOL(ip_conntrack_unexpect_related); |
932 | EXPORT_SYMBOL_GPL(ip_conntrack_expect_list); | 928 | EXPORT_SYMBOL_GPL(ip_conntrack_expect_list); |
diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c index fe0b634dd377..ef56de2eff0c 100644 --- a/net/ipv4/netfilter/ip_conntrack_tftp.c +++ b/net/ipv4/netfilter/ip_conntrack_tftp.c | |||
@@ -50,6 +50,7 @@ static int tftp_help(struct sk_buff **pskb, | |||
50 | struct tftphdr _tftph, *tfh; | 50 | struct tftphdr _tftph, *tfh; |
51 | struct ip_conntrack_expect *exp; | 51 | struct ip_conntrack_expect *exp; |
52 | unsigned int ret = NF_ACCEPT; | 52 | unsigned int ret = NF_ACCEPT; |
53 | typeof(ip_nat_tftp_hook) ip_nat_tftp; | ||
53 | 54 | ||
54 | tfh = skb_header_pointer(*pskb, | 55 | tfh = skb_header_pointer(*pskb, |
55 | (*pskb)->nh.iph->ihl*4+sizeof(struct udphdr), | 56 | (*pskb)->nh.iph->ihl*4+sizeof(struct udphdr), |
@@ -81,8 +82,9 @@ static int tftp_help(struct sk_buff **pskb, | |||
81 | DEBUGP("expect: "); | 82 | DEBUGP("expect: "); |
82 | DUMP_TUPLE(&exp->tuple); | 83 | DUMP_TUPLE(&exp->tuple); |
83 | DUMP_TUPLE(&exp->mask); | 84 | DUMP_TUPLE(&exp->mask); |
84 | if (ip_nat_tftp_hook) | 85 | ip_nat_tftp = rcu_dereference(ip_nat_tftp_hook); |
85 | ret = ip_nat_tftp_hook(pskb, ctinfo, exp); | 86 | if (ip_nat_tftp) |
87 | ret = ip_nat_tftp(pskb, ctinfo, exp); | ||
86 | else if (ip_conntrack_expect_related(exp) != 0) | 88 | else if (ip_conntrack_expect_related(exp) != 0) |
87 | ret = NF_DROP; | 89 | ret = NF_DROP; |
88 | ip_conntrack_expect_put(exp); | 90 | ip_conntrack_expect_put(exp); |
diff --git a/net/ipv4/netfilter/ip_nat_amanda.c b/net/ipv4/netfilter/ip_nat_amanda.c index 3a888715bbf3..85df1a9aed33 100644 --- a/net/ipv4/netfilter/ip_nat_amanda.c +++ b/net/ipv4/netfilter/ip_nat_amanda.c | |||
@@ -70,15 +70,14 @@ static unsigned int help(struct sk_buff **pskb, | |||
70 | 70 | ||
71 | static void __exit ip_nat_amanda_fini(void) | 71 | static void __exit ip_nat_amanda_fini(void) |
72 | { | 72 | { |
73 | ip_nat_amanda_hook = NULL; | 73 | rcu_assign_pointer(ip_nat_amanda_hook, NULL); |
74 | /* Make sure noone calls it, meanwhile. */ | 74 | synchronize_rcu(); |
75 | synchronize_net(); | ||
76 | } | 75 | } |
77 | 76 | ||
78 | static int __init ip_nat_amanda_init(void) | 77 | static int __init ip_nat_amanda_init(void) |
79 | { | 78 | { |
80 | BUG_ON(ip_nat_amanda_hook); | 79 | BUG_ON(rcu_dereference(ip_nat_amanda_hook)); |
81 | ip_nat_amanda_hook = help; | 80 | rcu_assign_pointer(ip_nat_amanda_hook, help); |
82 | return 0; | 81 | return 0; |
83 | } | 82 | } |
84 | 83 | ||
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c index 4b6260a97408..9d1a5175dcd4 100644 --- a/net/ipv4/netfilter/ip_nat_core.c +++ b/net/ipv4/netfilter/ip_nat_core.c | |||
@@ -362,12 +362,10 @@ manip_pkt(u_int16_t proto, | |||
362 | iph = (void *)(*pskb)->data + iphdroff; | 362 | iph = (void *)(*pskb)->data + iphdroff; |
363 | 363 | ||
364 | if (maniptype == IP_NAT_MANIP_SRC) { | 364 | if (maniptype == IP_NAT_MANIP_SRC) { |
365 | iph->check = nf_csum_update(~iph->saddr, target->src.ip, | 365 | nf_csum_replace4(&iph->check, iph->saddr, target->src.ip); |
366 | iph->check); | ||
367 | iph->saddr = target->src.ip; | 366 | iph->saddr = target->src.ip; |
368 | } else { | 367 | } else { |
369 | iph->check = nf_csum_update(~iph->daddr, target->dst.ip, | 368 | nf_csum_replace4(&iph->check, iph->daddr, target->dst.ip); |
370 | iph->check); | ||
371 | iph->daddr = target->dst.ip; | 369 | iph->daddr = target->dst.ip; |
372 | } | 370 | } |
373 | return 1; | 371 | return 1; |
diff --git a/net/ipv4/netfilter/ip_nat_ftp.c b/net/ipv4/netfilter/ip_nat_ftp.c index a71c233d8112..913960e1380f 100644 --- a/net/ipv4/netfilter/ip_nat_ftp.c +++ b/net/ipv4/netfilter/ip_nat_ftp.c | |||
@@ -156,15 +156,14 @@ static unsigned int ip_nat_ftp(struct sk_buff **pskb, | |||
156 | 156 | ||
157 | static void __exit ip_nat_ftp_fini(void) | 157 | static void __exit ip_nat_ftp_fini(void) |
158 | { | 158 | { |
159 | ip_nat_ftp_hook = NULL; | 159 | rcu_assign_pointer(ip_nat_ftp_hook, NULL); |
160 | /* Make sure noone calls it, meanwhile. */ | 160 | synchronize_rcu(); |
161 | synchronize_net(); | ||
162 | } | 161 | } |
163 | 162 | ||
164 | static int __init ip_nat_ftp_init(void) | 163 | static int __init ip_nat_ftp_init(void) |
165 | { | 164 | { |
166 | BUG_ON(ip_nat_ftp_hook); | 165 | BUG_ON(rcu_dereference(ip_nat_ftp_hook)); |
167 | ip_nat_ftp_hook = ip_nat_ftp; | 166 | rcu_assign_pointer(ip_nat_ftp_hook, ip_nat_ftp); |
168 | return 0; | 167 | return 0; |
169 | } | 168 | } |
170 | 169 | ||
diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c index 3bf858480558..ee80feb4b2a9 100644 --- a/net/ipv4/netfilter/ip_nat_helper.c +++ b/net/ipv4/netfilter/ip_nat_helper.c | |||
@@ -188,10 +188,8 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb, | |||
188 | csum_partial((char *)tcph, | 188 | csum_partial((char *)tcph, |
189 | datalen, 0)); | 189 | datalen, 0)); |
190 | } else | 190 | } else |
191 | tcph->check = nf_proto_csum_update(*pskb, | 191 | nf_proto_csum_replace2(&tcph->check, *pskb, |
192 | htons(oldlen) ^ htons(0xFFFF), | 192 | htons(oldlen), htons(datalen), 1); |
193 | htons(datalen), | ||
194 | tcph->check, 1); | ||
195 | 193 | ||
196 | if (rep_len != match_len) { | 194 | if (rep_len != match_len) { |
197 | set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); | 195 | set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); |
@@ -264,12 +262,10 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb, | |||
264 | csum_partial((char *)udph, | 262 | csum_partial((char *)udph, |
265 | datalen, 0)); | 263 | datalen, 0)); |
266 | if (!udph->check) | 264 | if (!udph->check) |
267 | udph->check = -1; | 265 | udph->check = CSUM_MANGLED_0; |
268 | } else | 266 | } else |
269 | udph->check = nf_proto_csum_update(*pskb, | 267 | nf_proto_csum_replace2(&udph->check, *pskb, |
270 | htons(oldlen) ^ htons(0xFFFF), | 268 | htons(oldlen), htons(datalen), 1); |
271 | htons(datalen), | ||
272 | udph->check, 1); | ||
273 | return 1; | 269 | return 1; |
274 | } | 270 | } |
275 | EXPORT_SYMBOL(ip_nat_mangle_udp_packet); | 271 | EXPORT_SYMBOL(ip_nat_mangle_udp_packet); |
@@ -307,14 +303,10 @@ sack_adjust(struct sk_buff *skb, | |||
307 | ntohl(sack->start_seq), new_start_seq, | 303 | ntohl(sack->start_seq), new_start_seq, |
308 | ntohl(sack->end_seq), new_end_seq); | 304 | ntohl(sack->end_seq), new_end_seq); |
309 | 305 | ||
310 | tcph->check = nf_proto_csum_update(skb, | 306 | nf_proto_csum_replace4(&tcph->check, skb, |
311 | ~sack->start_seq, | 307 | sack->start_seq, new_start_seq, 0); |
312 | new_start_seq, | 308 | nf_proto_csum_replace4(&tcph->check, skb, |
313 | tcph->check, 0); | 309 | sack->end_seq, new_end_seq, 0); |
314 | tcph->check = nf_proto_csum_update(skb, | ||
315 | ~sack->end_seq, | ||
316 | new_end_seq, | ||
317 | tcph->check, 0); | ||
318 | sack->start_seq = new_start_seq; | 310 | sack->start_seq = new_start_seq; |
319 | sack->end_seq = new_end_seq; | 311 | sack->end_seq = new_end_seq; |
320 | sackoff += sizeof(*sack); | 312 | sackoff += sizeof(*sack); |
@@ -397,10 +389,8 @@ ip_nat_seq_adjust(struct sk_buff **pskb, | |||
397 | else | 389 | else |
398 | newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before); | 390 | newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before); |
399 | 391 | ||
400 | tcph->check = nf_proto_csum_update(*pskb, ~tcph->seq, newseq, | 392 | nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0); |
401 | tcph->check, 0); | 393 | nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0); |
402 | tcph->check = nf_proto_csum_update(*pskb, ~tcph->ack_seq, newack, | ||
403 | tcph->check, 0); | ||
404 | 394 | ||
405 | DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n", | 395 | DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n", |
406 | ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq), | 396 | ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq), |
diff --git a/net/ipv4/netfilter/ip_nat_helper_h323.c b/net/ipv4/netfilter/ip_nat_helper_h323.c index 4a7d34466ee2..bdc99ef6159e 100644 --- a/net/ipv4/netfilter/ip_nat_helper_h323.c +++ b/net/ipv4/netfilter/ip_nat_helper_h323.c | |||
@@ -563,25 +563,25 @@ static int nat_callforwarding(struct sk_buff **pskb, struct ip_conntrack *ct, | |||
563 | /****************************************************************************/ | 563 | /****************************************************************************/ |
564 | static int __init init(void) | 564 | static int __init init(void) |
565 | { | 565 | { |
566 | BUG_ON(set_h245_addr_hook != NULL); | 566 | BUG_ON(rcu_dereference(set_h245_addr_hook) != NULL); |
567 | BUG_ON(set_h225_addr_hook != NULL); | 567 | BUG_ON(rcu_dereference(set_h225_addr_hook) != NULL); |
568 | BUG_ON(set_sig_addr_hook != NULL); | 568 | BUG_ON(rcu_dereference(set_sig_addr_hook) != NULL); |
569 | BUG_ON(set_ras_addr_hook != NULL); | 569 | BUG_ON(rcu_dereference(set_ras_addr_hook) != NULL); |
570 | BUG_ON(nat_rtp_rtcp_hook != NULL); | 570 | BUG_ON(rcu_dereference(nat_rtp_rtcp_hook) != NULL); |
571 | BUG_ON(nat_t120_hook != NULL); | 571 | BUG_ON(rcu_dereference(nat_t120_hook) != NULL); |
572 | BUG_ON(nat_h245_hook != NULL); | 572 | BUG_ON(rcu_dereference(nat_h245_hook) != NULL); |
573 | BUG_ON(nat_callforwarding_hook != NULL); | 573 | BUG_ON(rcu_dereference(nat_callforwarding_hook) != NULL); |
574 | BUG_ON(nat_q931_hook != NULL); | 574 | BUG_ON(rcu_dereference(nat_q931_hook) != NULL); |
575 | 575 | ||
576 | set_h245_addr_hook = set_h245_addr; | 576 | rcu_assign_pointer(set_h245_addr_hook, set_h245_addr); |
577 | set_h225_addr_hook = set_h225_addr; | 577 | rcu_assign_pointer(set_h225_addr_hook, set_h225_addr); |
578 | set_sig_addr_hook = set_sig_addr; | 578 | rcu_assign_pointer(set_sig_addr_hook, set_sig_addr); |
579 | set_ras_addr_hook = set_ras_addr; | 579 | rcu_assign_pointer(set_ras_addr_hook, set_ras_addr); |
580 | nat_rtp_rtcp_hook = nat_rtp_rtcp; | 580 | rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp); |
581 | nat_t120_hook = nat_t120; | 581 | rcu_assign_pointer(nat_t120_hook, nat_t120); |
582 | nat_h245_hook = nat_h245; | 582 | rcu_assign_pointer(nat_h245_hook, nat_h245); |
583 | nat_callforwarding_hook = nat_callforwarding; | 583 | rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding); |
584 | nat_q931_hook = nat_q931; | 584 | rcu_assign_pointer(nat_q931_hook, nat_q931); |
585 | 585 | ||
586 | DEBUGP("ip_nat_h323: init success\n"); | 586 | DEBUGP("ip_nat_h323: init success\n"); |
587 | return 0; | 587 | return 0; |
@@ -590,16 +590,16 @@ static int __init init(void) | |||
590 | /****************************************************************************/ | 590 | /****************************************************************************/ |
591 | static void __exit fini(void) | 591 | static void __exit fini(void) |
592 | { | 592 | { |
593 | set_h245_addr_hook = NULL; | 593 | rcu_assign_pointer(set_h245_addr_hook, NULL); |
594 | set_h225_addr_hook = NULL; | 594 | rcu_assign_pointer(set_h225_addr_hook, NULL); |
595 | set_sig_addr_hook = NULL; | 595 | rcu_assign_pointer(set_sig_addr_hook, NULL); |
596 | set_ras_addr_hook = NULL; | 596 | rcu_assign_pointer(set_ras_addr_hook, NULL); |
597 | nat_rtp_rtcp_hook = NULL; | 597 | rcu_assign_pointer(nat_rtp_rtcp_hook, NULL); |
598 | nat_t120_hook = NULL; | 598 | rcu_assign_pointer(nat_t120_hook, NULL); |
599 | nat_h245_hook = NULL; | 599 | rcu_assign_pointer(nat_h245_hook, NULL); |
600 | nat_callforwarding_hook = NULL; | 600 | rcu_assign_pointer(nat_callforwarding_hook, NULL); |
601 | nat_q931_hook = NULL; | 601 | rcu_assign_pointer(nat_q931_hook, NULL); |
602 | synchronize_net(); | 602 | synchronize_rcu(); |
603 | } | 603 | } |
604 | 604 | ||
605 | /****************************************************************************/ | 605 | /****************************************************************************/ |
diff --git a/net/ipv4/netfilter/ip_nat_helper_pptp.c b/net/ipv4/netfilter/ip_nat_helper_pptp.c index 329fdcd7d702..ec957bbb5366 100644 --- a/net/ipv4/netfilter/ip_nat_helper_pptp.c +++ b/net/ipv4/netfilter/ip_nat_helper_pptp.c | |||
@@ -101,7 +101,7 @@ static void pptp_nat_expected(struct ip_conntrack *ct, | |||
101 | 101 | ||
102 | DEBUGP("trying to unexpect other dir: "); | 102 | DEBUGP("trying to unexpect other dir: "); |
103 | DUMP_TUPLE(&t); | 103 | DUMP_TUPLE(&t); |
104 | other_exp = ip_conntrack_expect_find(&t); | 104 | other_exp = ip_conntrack_expect_find_get(&t); |
105 | if (other_exp) { | 105 | if (other_exp) { |
106 | ip_conntrack_unexpect_related(other_exp); | 106 | ip_conntrack_unexpect_related(other_exp); |
107 | ip_conntrack_expect_put(other_exp); | 107 | ip_conntrack_expect_put(other_exp); |
@@ -315,17 +315,17 @@ static int __init ip_nat_helper_pptp_init(void) | |||
315 | if (ret < 0) | 315 | if (ret < 0) |
316 | return ret; | 316 | return ret; |
317 | 317 | ||
318 | BUG_ON(ip_nat_pptp_hook_outbound); | 318 | BUG_ON(rcu_dereference(ip_nat_pptp_hook_outbound)); |
319 | ip_nat_pptp_hook_outbound = &pptp_outbound_pkt; | 319 | rcu_assign_pointer(ip_nat_pptp_hook_outbound, pptp_outbound_pkt); |
320 | 320 | ||
321 | BUG_ON(ip_nat_pptp_hook_inbound); | 321 | BUG_ON(rcu_dereference(ip_nat_pptp_hook_inbound)); |
322 | ip_nat_pptp_hook_inbound = &pptp_inbound_pkt; | 322 | rcu_assign_pointer(ip_nat_pptp_hook_inbound, pptp_inbound_pkt); |
323 | 323 | ||
324 | BUG_ON(ip_nat_pptp_hook_exp_gre); | 324 | BUG_ON(rcu_dereference(ip_nat_pptp_hook_exp_gre)); |
325 | ip_nat_pptp_hook_exp_gre = &pptp_exp_gre; | 325 | rcu_assign_pointer(ip_nat_pptp_hook_exp_gre, pptp_exp_gre); |
326 | 326 | ||
327 | BUG_ON(ip_nat_pptp_hook_expectfn); | 327 | BUG_ON(rcu_dereference(ip_nat_pptp_hook_expectfn)); |
328 | ip_nat_pptp_hook_expectfn = &pptp_nat_expected; | 328 | rcu_assign_pointer(ip_nat_pptp_hook_expectfn, pptp_nat_expected); |
329 | 329 | ||
330 | printk("ip_nat_pptp version %s loaded\n", IP_NAT_PPTP_VERSION); | 330 | printk("ip_nat_pptp version %s loaded\n", IP_NAT_PPTP_VERSION); |
331 | return 0; | 331 | return 0; |
@@ -335,14 +335,13 @@ static void __exit ip_nat_helper_pptp_fini(void) | |||
335 | { | 335 | { |
336 | DEBUGP("cleanup_module\n" ); | 336 | DEBUGP("cleanup_module\n" ); |
337 | 337 | ||
338 | ip_nat_pptp_hook_expectfn = NULL; | 338 | rcu_assign_pointer(ip_nat_pptp_hook_expectfn, NULL); |
339 | ip_nat_pptp_hook_exp_gre = NULL; | 339 | rcu_assign_pointer(ip_nat_pptp_hook_exp_gre, NULL); |
340 | ip_nat_pptp_hook_inbound = NULL; | 340 | rcu_assign_pointer(ip_nat_pptp_hook_inbound, NULL); |
341 | ip_nat_pptp_hook_outbound = NULL; | 341 | rcu_assign_pointer(ip_nat_pptp_hook_outbound, NULL); |
342 | synchronize_rcu(); | ||
342 | 343 | ||
343 | ip_nat_proto_gre_fini(); | 344 | ip_nat_proto_gre_fini(); |
344 | /* Make sure noone calls it, meanwhile */ | ||
345 | synchronize_net(); | ||
346 | 345 | ||
347 | printk("ip_nat_pptp version %s unloaded\n", IP_NAT_PPTP_VERSION); | 346 | printk("ip_nat_pptp version %s unloaded\n", IP_NAT_PPTP_VERSION); |
348 | } | 347 | } |
diff --git a/net/ipv4/netfilter/ip_nat_irc.c b/net/ipv4/netfilter/ip_nat_irc.c index a767123e082c..feb26b48f1d5 100644 --- a/net/ipv4/netfilter/ip_nat_irc.c +++ b/net/ipv4/netfilter/ip_nat_irc.c | |||
@@ -98,15 +98,14 @@ static unsigned int help(struct sk_buff **pskb, | |||
98 | 98 | ||
99 | static void __exit ip_nat_irc_fini(void) | 99 | static void __exit ip_nat_irc_fini(void) |
100 | { | 100 | { |
101 | ip_nat_irc_hook = NULL; | 101 | rcu_assign_pointer(ip_nat_irc_hook, NULL); |
102 | /* Make sure noone calls it, meanwhile. */ | 102 | synchronize_rcu(); |
103 | synchronize_net(); | ||
104 | } | 103 | } |
105 | 104 | ||
106 | static int __init ip_nat_irc_init(void) | 105 | static int __init ip_nat_irc_init(void) |
107 | { | 106 | { |
108 | BUG_ON(ip_nat_irc_hook); | 107 | BUG_ON(rcu_dereference(ip_nat_irc_hook)); |
109 | ip_nat_irc_hook = help; | 108 | rcu_assign_pointer(ip_nat_irc_hook, help); |
110 | return 0; | 109 | return 0; |
111 | } | 110 | } |
112 | 111 | ||
diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c index bf91f9312b3c..95810202d849 100644 --- a/net/ipv4/netfilter/ip_nat_proto_gre.c +++ b/net/ipv4/netfilter/ip_nat_proto_gre.c | |||
@@ -129,11 +129,9 @@ gre_manip_pkt(struct sk_buff **pskb, | |||
129 | } | 129 | } |
130 | if (greh->csum) { | 130 | if (greh->csum) { |
131 | /* FIXME: Never tested this code... */ | 131 | /* FIXME: Never tested this code... */ |
132 | *(gre_csum(greh)) = | 132 | nf_proto_csum_replace4(gre_csum(greh), *pskb, |
133 | nf_proto_csum_update(*pskb, | 133 | *(gre_key(greh)), |
134 | ~*(gre_key(greh)), | 134 | tuple->dst.u.gre.key, 0); |
135 | tuple->dst.u.gre.key, | ||
136 | *(gre_csum(greh)), 0); | ||
137 | } | 135 | } |
138 | *(gre_key(greh)) = tuple->dst.u.gre.key; | 136 | *(gre_key(greh)) = tuple->dst.u.gre.key; |
139 | break; | 137 | break; |
diff --git a/net/ipv4/netfilter/ip_nat_proto_icmp.c b/net/ipv4/netfilter/ip_nat_proto_icmp.c index 3f6efc13ac74..fb716edd5bc6 100644 --- a/net/ipv4/netfilter/ip_nat_proto_icmp.c +++ b/net/ipv4/netfilter/ip_nat_proto_icmp.c | |||
@@ -24,8 +24,8 @@ icmp_in_range(const struct ip_conntrack_tuple *tuple, | |||
24 | const union ip_conntrack_manip_proto *min, | 24 | const union ip_conntrack_manip_proto *min, |
25 | const union ip_conntrack_manip_proto *max) | 25 | const union ip_conntrack_manip_proto *max) |
26 | { | 26 | { |
27 | return (tuple->src.u.icmp.id >= min->icmp.id | 27 | return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) && |
28 | && tuple->src.u.icmp.id <= max->icmp.id); | 28 | ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id); |
29 | } | 29 | } |
30 | 30 | ||
31 | static int | 31 | static int |
@@ -66,10 +66,8 @@ icmp_manip_pkt(struct sk_buff **pskb, | |||
66 | return 0; | 66 | return 0; |
67 | 67 | ||
68 | hdr = (struct icmphdr *)((*pskb)->data + hdroff); | 68 | hdr = (struct icmphdr *)((*pskb)->data + hdroff); |
69 | hdr->checksum = nf_proto_csum_update(*pskb, | 69 | nf_proto_csum_replace2(&hdr->checksum, *pskb, |
70 | hdr->un.echo.id ^ htons(0xFFFF), | 70 | hdr->un.echo.id, tuple->src.u.icmp.id, 0); |
71 | tuple->src.u.icmp.id, | ||
72 | hdr->checksum, 0); | ||
73 | hdr->un.echo.id = tuple->src.u.icmp.id; | 71 | hdr->un.echo.id = tuple->src.u.icmp.id; |
74 | return 1; | 72 | return 1; |
75 | } | 73 | } |
diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c index 12deb13b93b1..b586d18b3fb3 100644 --- a/net/ipv4/netfilter/ip_nat_proto_tcp.c +++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c | |||
@@ -129,9 +129,8 @@ tcp_manip_pkt(struct sk_buff **pskb, | |||
129 | if (hdrsize < sizeof(*hdr)) | 129 | if (hdrsize < sizeof(*hdr)) |
130 | return 1; | 130 | return 1; |
131 | 131 | ||
132 | hdr->check = nf_proto_csum_update(*pskb, ~oldip, newip, hdr->check, 1); | 132 | nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1); |
133 | hdr->check = nf_proto_csum_update(*pskb, oldport ^ htons(0xFFFF), newport, | 133 | nf_proto_csum_replace2(&hdr->check, *pskb, oldport, newport, 0); |
134 | hdr->check, 0); | ||
135 | return 1; | 134 | return 1; |
136 | } | 135 | } |
137 | 136 | ||
diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c index 4bbec7730d18..5ced0877b32f 100644 --- a/net/ipv4/netfilter/ip_nat_proto_udp.c +++ b/net/ipv4/netfilter/ip_nat_proto_udp.c | |||
@@ -115,13 +115,10 @@ udp_manip_pkt(struct sk_buff **pskb, | |||
115 | } | 115 | } |
116 | 116 | ||
117 | if (hdr->check || (*pskb)->ip_summed == CHECKSUM_PARTIAL) { | 117 | if (hdr->check || (*pskb)->ip_summed == CHECKSUM_PARTIAL) { |
118 | hdr->check = nf_proto_csum_update(*pskb, ~oldip, newip, | 118 | nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1); |
119 | hdr->check, 1); | 119 | nf_proto_csum_replace2(&hdr->check, *pskb, *portptr, newport, 0); |
120 | hdr->check = nf_proto_csum_update(*pskb, | ||
121 | *portptr ^ htons(0xFFFF), newport, | ||
122 | hdr->check, 0); | ||
123 | if (!hdr->check) | 120 | if (!hdr->check) |
124 | hdr->check = -1; | 121 | hdr->check = CSUM_MANGLED_0; |
125 | } | 122 | } |
126 | *portptr = newport; | 123 | *portptr = newport; |
127 | return 1; | 124 | return 1; |
diff --git a/net/ipv4/netfilter/ip_nat_sip.c b/net/ipv4/netfilter/ip_nat_sip.c index 71fc2730a007..6223abc924ff 100644 --- a/net/ipv4/netfilter/ip_nat_sip.c +++ b/net/ipv4/netfilter/ip_nat_sip.c | |||
@@ -29,27 +29,70 @@ MODULE_DESCRIPTION("SIP NAT helper"); | |||
29 | #define DEBUGP(format, args...) | 29 | #define DEBUGP(format, args...) |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | extern struct sip_header_nfo ct_sip_hdrs[]; | 32 | struct addr_map { |
33 | struct { | ||
34 | char src[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; | ||
35 | char dst[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; | ||
36 | unsigned int srclen, srciplen; | ||
37 | unsigned int dstlen, dstiplen; | ||
38 | } addr[IP_CT_DIR_MAX]; | ||
39 | }; | ||
40 | |||
41 | static void addr_map_init(struct ip_conntrack *ct, struct addr_map *map) | ||
42 | { | ||
43 | struct ip_conntrack_tuple *t; | ||
44 | enum ip_conntrack_dir dir; | ||
45 | unsigned int n; | ||
46 | |||
47 | for (dir = 0; dir < IP_CT_DIR_MAX; dir++) { | ||
48 | t = &ct->tuplehash[dir].tuple; | ||
49 | |||
50 | n = sprintf(map->addr[dir].src, "%u.%u.%u.%u", | ||
51 | NIPQUAD(t->src.ip)); | ||
52 | map->addr[dir].srciplen = n; | ||
53 | n += sprintf(map->addr[dir].src + n, ":%u", | ||
54 | ntohs(t->src.u.udp.port)); | ||
55 | map->addr[dir].srclen = n; | ||
56 | |||
57 | n = sprintf(map->addr[dir].dst, "%u.%u.%u.%u", | ||
58 | NIPQUAD(t->dst.ip)); | ||
59 | map->addr[dir].dstiplen = n; | ||
60 | n += sprintf(map->addr[dir].dst + n, ":%u", | ||
61 | ntohs(t->dst.u.udp.port)); | ||
62 | map->addr[dir].dstlen = n; | ||
63 | } | ||
64 | } | ||
33 | 65 | ||
34 | static unsigned int mangle_sip_packet(struct sk_buff **pskb, | 66 | static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo, |
35 | enum ip_conntrack_info ctinfo, | 67 | struct ip_conntrack *ct, const char **dptr, size_t dlen, |
36 | struct ip_conntrack *ct, | 68 | enum sip_header_pos pos, struct addr_map *map) |
37 | const char **dptr, size_t dlen, | ||
38 | char *buffer, int bufflen, | ||
39 | struct sip_header_nfo *hnfo) | ||
40 | { | 69 | { |
41 | unsigned int matchlen, matchoff; | 70 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
71 | unsigned int matchlen, matchoff, addrlen; | ||
72 | char *addr; | ||
42 | 73 | ||
43 | if (ct_sip_get_info(*dptr, dlen, &matchoff, &matchlen, hnfo) <= 0) | 74 | if (ct_sip_get_info(*dptr, dlen, &matchoff, &matchlen, pos) <= 0) |
44 | return 0; | 75 | return 1; |
76 | |||
77 | if ((matchlen == map->addr[dir].srciplen || | ||
78 | matchlen == map->addr[dir].srclen) && | ||
79 | memcmp(*dptr + matchoff, map->addr[dir].src, matchlen) == 0) { | ||
80 | addr = map->addr[!dir].dst; | ||
81 | addrlen = map->addr[!dir].dstlen; | ||
82 | } else if ((matchlen == map->addr[dir].dstiplen || | ||
83 | matchlen == map->addr[dir].dstlen) && | ||
84 | memcmp(*dptr + matchoff, map->addr[dir].dst, matchlen) == 0) { | ||
85 | addr = map->addr[!dir].src; | ||
86 | addrlen = map->addr[!dir].srclen; | ||
87 | } else | ||
88 | return 1; | ||
45 | 89 | ||
46 | if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, | 90 | if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, |
47 | matchoff, matchlen, buffer, bufflen)) | 91 | matchoff, matchlen, addr, addrlen)) |
48 | return 0; | 92 | return 0; |
49 | |||
50 | /* We need to reload this. Thanks Patrick. */ | ||
51 | *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); | 93 | *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); |
52 | return 1; | 94 | return 1; |
95 | |||
53 | } | 96 | } |
54 | 97 | ||
55 | static unsigned int ip_nat_sip(struct sk_buff **pskb, | 98 | static unsigned int ip_nat_sip(struct sk_buff **pskb, |
@@ -57,70 +100,61 @@ static unsigned int ip_nat_sip(struct sk_buff **pskb, | |||
57 | struct ip_conntrack *ct, | 100 | struct ip_conntrack *ct, |
58 | const char **dptr) | 101 | const char **dptr) |
59 | { | 102 | { |
60 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 103 | enum sip_header_pos pos; |
61 | char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; | 104 | struct addr_map map; |
62 | unsigned int bufflen, dataoff; | 105 | int dataoff, datalen; |
63 | __be32 ip; | ||
64 | __be16 port; | ||
65 | 106 | ||
66 | dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); | 107 | dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); |
108 | datalen = (*pskb)->len - dataoff; | ||
109 | if (datalen < sizeof("SIP/2.0") - 1) | ||
110 | return NF_DROP; | ||
111 | |||
112 | addr_map_init(ct, &map); | ||
113 | |||
114 | /* Basic rules: requests and responses. */ | ||
115 | if (strncmp(*dptr, "SIP/2.0", sizeof("SIP/2.0") - 1) != 0) { | ||
116 | /* 10.2: Constructing the REGISTER Request: | ||
117 | * | ||
118 | * The "userinfo" and "@" components of the SIP URI MUST NOT | ||
119 | * be present. | ||
120 | */ | ||
121 | if (datalen >= sizeof("REGISTER") - 1 && | ||
122 | strncmp(*dptr, "REGISTER", sizeof("REGISTER") - 1) == 0) | ||
123 | pos = POS_REG_REQ_URI; | ||
124 | else | ||
125 | pos = POS_REQ_URI; | ||
126 | |||
127 | if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, pos, &map)) | ||
128 | return NF_DROP; | ||
129 | } | ||
130 | |||
131 | if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_FROM, &map) || | ||
132 | !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_TO, &map) || | ||
133 | !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_VIA, &map) || | ||
134 | !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_CONTACT, &map)) | ||
135 | return NF_DROP; | ||
136 | return NF_ACCEPT; | ||
137 | } | ||
67 | 138 | ||
68 | ip = ct->tuplehash[!dir].tuple.dst.ip; | 139 | static unsigned int mangle_sip_packet(struct sk_buff **pskb, |
69 | port = ct->tuplehash[!dir].tuple.dst.u.udp.port; | 140 | enum ip_conntrack_info ctinfo, |
70 | bufflen = sprintf(buffer, "%u.%u.%u.%u:%u", NIPQUAD(ip), ntohs(port)); | 141 | struct ip_conntrack *ct, |
142 | const char **dptr, size_t dlen, | ||
143 | char *buffer, int bufflen, | ||
144 | enum sip_header_pos pos) | ||
145 | { | ||
146 | unsigned int matchlen, matchoff; | ||
71 | 147 | ||
72 | /* short packet ? */ | 148 | if (ct_sip_get_info(*dptr, dlen, &matchoff, &matchlen, pos) <= 0) |
73 | if (((*pskb)->len - dataoff) < (sizeof("SIP/2.0") - 1)) | ||
74 | return 0; | 149 | return 0; |
75 | 150 | ||
76 | /* Basic rules: requests and responses. */ | 151 | if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, |
77 | if (memcmp(*dptr, "SIP/2.0", sizeof("SIP/2.0") - 1) == 0) { | 152 | matchoff, matchlen, buffer, bufflen)) |
78 | const char *aux; | 153 | return 0; |
79 | |||
80 | if ((ctinfo) < IP_CT_IS_REPLY) { | ||
81 | mangle_sip_packet(pskb, ctinfo, ct, dptr, | ||
82 | (*pskb)->len - dataoff, | ||
83 | buffer, bufflen, | ||
84 | &ct_sip_hdrs[POS_CONTACT]); | ||
85 | return 1; | ||
86 | } | ||
87 | 154 | ||
88 | if (!mangle_sip_packet(pskb, ctinfo, ct, dptr, | 155 | /* We need to reload this. Thanks Patrick. */ |
89 | (*pskb)->len - dataoff, | 156 | *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); |
90 | buffer, bufflen, &ct_sip_hdrs[POS_VIA])) | 157 | return 1; |
91 | return 0; | ||
92 | |||
93 | /* This search should ignore case, but later.. */ | ||
94 | aux = ct_sip_search("CSeq:", *dptr, sizeof("CSeq:") - 1, | ||
95 | (*pskb)->len - dataoff); | ||
96 | if (!aux) | ||
97 | return 0; | ||
98 | |||
99 | if (!ct_sip_search("REGISTER", aux, sizeof("REGISTER"), | ||
100 | ct_sip_lnlen(aux, *dptr + (*pskb)->len - dataoff))) | ||
101 | return 1; | ||
102 | |||
103 | return mangle_sip_packet(pskb, ctinfo, ct, dptr, | ||
104 | (*pskb)->len - dataoff, | ||
105 | buffer, bufflen, | ||
106 | &ct_sip_hdrs[POS_CONTACT]); | ||
107 | } | ||
108 | if ((ctinfo) < IP_CT_IS_REPLY) { | ||
109 | if (!mangle_sip_packet(pskb, ctinfo, ct, dptr, | ||
110 | (*pskb)->len - dataoff, | ||
111 | buffer, bufflen, &ct_sip_hdrs[POS_VIA])) | ||
112 | return 0; | ||
113 | |||
114 | /* Mangle Contact if exists only. - watch udp_nat_mangle()! */ | ||
115 | mangle_sip_packet(pskb, ctinfo, ct, dptr, (*pskb)->len - dataoff, | ||
116 | buffer, bufflen, &ct_sip_hdrs[POS_CONTACT]); | ||
117 | return 1; | ||
118 | } | ||
119 | /* This mangle requests headers. */ | ||
120 | return mangle_sip_packet(pskb, ctinfo, ct, dptr, | ||
121 | ct_sip_lnlen(*dptr, | ||
122 | *dptr + (*pskb)->len - dataoff), | ||
123 | buffer, bufflen, &ct_sip_hdrs[POS_REQ_HEADER]); | ||
124 | } | 158 | } |
125 | 159 | ||
126 | static int mangle_content_len(struct sk_buff **pskb, | 160 | static int mangle_content_len(struct sk_buff **pskb, |
@@ -136,7 +170,7 @@ static int mangle_content_len(struct sk_buff **pskb, | |||
136 | 170 | ||
137 | /* Get actual SDP lenght */ | 171 | /* Get actual SDP lenght */ |
138 | if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff, | 172 | if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff, |
139 | &matchlen, &ct_sip_hdrs[POS_SDP_HEADER]) > 0) { | 173 | &matchlen, POS_SDP_HEADER) > 0) { |
140 | 174 | ||
141 | /* since ct_sip_get_info() give us a pointer passing 'v=' | 175 | /* since ct_sip_get_info() give us a pointer passing 'v=' |
142 | we need to add 2 bytes in this count. */ | 176 | we need to add 2 bytes in this count. */ |
@@ -144,7 +178,7 @@ static int mangle_content_len(struct sk_buff **pskb, | |||
144 | 178 | ||
145 | /* Now, update SDP lenght */ | 179 | /* Now, update SDP lenght */ |
146 | if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff, | 180 | if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff, |
147 | &matchlen, &ct_sip_hdrs[POS_CONTENT]) > 0) { | 181 | &matchlen, POS_CONTENT) > 0) { |
148 | 182 | ||
149 | bufflen = sprintf(buffer, "%u", c_len); | 183 | bufflen = sprintf(buffer, "%u", c_len); |
150 | 184 | ||
@@ -170,17 +204,17 @@ static unsigned int mangle_sdp(struct sk_buff **pskb, | |||
170 | /* Mangle owner and contact info. */ | 204 | /* Mangle owner and contact info. */ |
171 | bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); | 205 | bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); |
172 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | 206 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, |
173 | buffer, bufflen, &ct_sip_hdrs[POS_OWNER])) | 207 | buffer, bufflen, POS_OWNER)) |
174 | return 0; | 208 | return 0; |
175 | 209 | ||
176 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | 210 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, |
177 | buffer, bufflen, &ct_sip_hdrs[POS_CONNECTION])) | 211 | buffer, bufflen, POS_CONNECTION)) |
178 | return 0; | 212 | return 0; |
179 | 213 | ||
180 | /* Mangle media port. */ | 214 | /* Mangle media port. */ |
181 | bufflen = sprintf(buffer, "%u", port); | 215 | bufflen = sprintf(buffer, "%u", port); |
182 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | 216 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, |
183 | buffer, bufflen, &ct_sip_hdrs[POS_MEDIA])) | 217 | buffer, bufflen, POS_MEDIA)) |
184 | return 0; | 218 | return 0; |
185 | 219 | ||
186 | return mangle_content_len(pskb, ctinfo, ct, dptr); | 220 | return mangle_content_len(pskb, ctinfo, ct, dptr); |
@@ -230,18 +264,17 @@ static unsigned int ip_nat_sdp(struct sk_buff **pskb, | |||
230 | 264 | ||
231 | static void __exit fini(void) | 265 | static void __exit fini(void) |
232 | { | 266 | { |
233 | ip_nat_sip_hook = NULL; | 267 | rcu_assign_pointer(ip_nat_sip_hook, NULL); |
234 | ip_nat_sdp_hook = NULL; | 268 | rcu_assign_pointer(ip_nat_sdp_hook, NULL); |
235 | /* Make sure noone calls it, meanwhile. */ | 269 | synchronize_rcu(); |
236 | synchronize_net(); | ||
237 | } | 270 | } |
238 | 271 | ||
239 | static int __init init(void) | 272 | static int __init init(void) |
240 | { | 273 | { |
241 | BUG_ON(ip_nat_sip_hook); | 274 | BUG_ON(rcu_dereference(ip_nat_sip_hook)); |
242 | BUG_ON(ip_nat_sdp_hook); | 275 | BUG_ON(rcu_dereference(ip_nat_sdp_hook)); |
243 | ip_nat_sip_hook = ip_nat_sip; | 276 | rcu_assign_pointer(ip_nat_sip_hook, ip_nat_sip); |
244 | ip_nat_sdp_hook = ip_nat_sdp; | 277 | rcu_assign_pointer(ip_nat_sdp_hook, ip_nat_sdp); |
245 | return 0; | 278 | return 0; |
246 | } | 279 | } |
247 | 280 | ||
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c index 168f45fa1898..c3d9f3b090c4 100644 --- a/net/ipv4/netfilter/ip_nat_snmp_basic.c +++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c | |||
@@ -64,7 +64,7 @@ MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway"); | |||
64 | 64 | ||
65 | #define SNMP_PORT 161 | 65 | #define SNMP_PORT 161 |
66 | #define SNMP_TRAP_PORT 162 | 66 | #define SNMP_TRAP_PORT 162 |
67 | #define NOCT1(n) (u_int8_t )((n) & 0xff) | 67 | #define NOCT1(n) (*(u8 *)n) |
68 | 68 | ||
69 | static int debug; | 69 | static int debug; |
70 | static DEFINE_SPINLOCK(snmp_lock); | 70 | static DEFINE_SPINLOCK(snmp_lock); |
@@ -613,7 +613,7 @@ struct snmp_v1_trap | |||
613 | static inline void mangle_address(unsigned char *begin, | 613 | static inline void mangle_address(unsigned char *begin, |
614 | unsigned char *addr, | 614 | unsigned char *addr, |
615 | const struct oct1_map *map, | 615 | const struct oct1_map *map, |
616 | u_int16_t *check); | 616 | __sum16 *check); |
617 | struct snmp_cnv | 617 | struct snmp_cnv |
618 | { | 618 | { |
619 | unsigned int class; | 619 | unsigned int class; |
@@ -873,38 +873,24 @@ static unsigned char snmp_request_decode(struct asn1_ctx *ctx, | |||
873 | * Fast checksum update for possibly oddly-aligned UDP byte, from the | 873 | * Fast checksum update for possibly oddly-aligned UDP byte, from the |
874 | * code example in the draft. | 874 | * code example in the draft. |
875 | */ | 875 | */ |
876 | static void fast_csum(unsigned char *csum, | 876 | static void fast_csum(__sum16 *csum, |
877 | const unsigned char *optr, | 877 | const unsigned char *optr, |
878 | const unsigned char *nptr, | 878 | const unsigned char *nptr, |
879 | int odd) | 879 | int offset) |
880 | { | 880 | { |
881 | long x, old, new; | 881 | unsigned char s[4]; |
882 | 882 | ||
883 | x = csum[0] * 256 + csum[1]; | 883 | if (offset & 1) { |
884 | 884 | s[0] = s[2] = 0; | |
885 | x =~ x & 0xFFFF; | 885 | s[1] = ~*optr; |
886 | 886 | s[3] = *nptr; | |
887 | if (odd) old = optr[0] * 256; | 887 | } else { |
888 | else old = optr[0]; | 888 | s[1] = s[3] = 0; |
889 | 889 | s[0] = ~*optr; | |
890 | x -= old & 0xFFFF; | 890 | s[2] = *nptr; |
891 | if (x <= 0) { | ||
892 | x--; | ||
893 | x &= 0xFFFF; | ||
894 | } | ||
895 | |||
896 | if (odd) new = nptr[0] * 256; | ||
897 | else new = nptr[0]; | ||
898 | |||
899 | x += new & 0xFFFF; | ||
900 | if (x & 0x10000) { | ||
901 | x++; | ||
902 | x &= 0xFFFF; | ||
903 | } | 891 | } |
904 | 892 | ||
905 | x =~ x & 0xFFFF; | 893 | *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum))); |
906 | csum[0] = x / 256; | ||
907 | csum[1] = x & 0xFF; | ||
908 | } | 894 | } |
909 | 895 | ||
910 | /* | 896 | /* |
@@ -915,9 +901,9 @@ static void fast_csum(unsigned char *csum, | |||
915 | static inline void mangle_address(unsigned char *begin, | 901 | static inline void mangle_address(unsigned char *begin, |
916 | unsigned char *addr, | 902 | unsigned char *addr, |
917 | const struct oct1_map *map, | 903 | const struct oct1_map *map, |
918 | u_int16_t *check) | 904 | __sum16 *check) |
919 | { | 905 | { |
920 | if (map->from == NOCT1(*addr)) { | 906 | if (map->from == NOCT1(addr)) { |
921 | u_int32_t old; | 907 | u_int32_t old; |
922 | 908 | ||
923 | if (debug) | 909 | if (debug) |
@@ -927,11 +913,8 @@ static inline void mangle_address(unsigned char *begin, | |||
927 | 913 | ||
928 | /* Update UDP checksum if being used */ | 914 | /* Update UDP checksum if being used */ |
929 | if (*check) { | 915 | if (*check) { |
930 | unsigned char odd = !((addr - begin) % 2); | 916 | fast_csum(check, |
931 | 917 | &map->from, &map->to, addr - begin); | |
932 | fast_csum((unsigned char *)check, | ||
933 | &map->from, &map->to, odd); | ||
934 | |||
935 | } | 918 | } |
936 | 919 | ||
937 | if (debug) | 920 | if (debug) |
@@ -943,7 +926,7 @@ static inline void mangle_address(unsigned char *begin, | |||
943 | static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, | 926 | static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, |
944 | struct snmp_v1_trap *trap, | 927 | struct snmp_v1_trap *trap, |
945 | const struct oct1_map *map, | 928 | const struct oct1_map *map, |
946 | u_int16_t *check) | 929 | __sum16 *check) |
947 | { | 930 | { |
948 | unsigned int cls, con, tag, len; | 931 | unsigned int cls, con, tag, len; |
949 | unsigned char *end; | 932 | unsigned char *end; |
@@ -1037,7 +1020,7 @@ static void hex_dump(unsigned char *buf, size_t len) | |||
1037 | static int snmp_parse_mangle(unsigned char *msg, | 1020 | static int snmp_parse_mangle(unsigned char *msg, |
1038 | u_int16_t len, | 1021 | u_int16_t len, |
1039 | const struct oct1_map *map, | 1022 | const struct oct1_map *map, |
1040 | u_int16_t *check) | 1023 | __sum16 *check) |
1041 | { | 1024 | { |
1042 | unsigned char *eoc, *end; | 1025 | unsigned char *eoc, *end; |
1043 | unsigned int cls, con, tag, vers, pdutype; | 1026 | unsigned int cls, con, tag, vers, pdutype; |
@@ -1223,12 +1206,12 @@ static int snmp_translate(struct ip_conntrack *ct, | |||
1223 | */ | 1206 | */ |
1224 | if (dir == IP_CT_DIR_ORIGINAL) { | 1207 | if (dir == IP_CT_DIR_ORIGINAL) { |
1225 | /* SNAT traps */ | 1208 | /* SNAT traps */ |
1226 | map.from = NOCT1(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip); | 1209 | map.from = NOCT1(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip); |
1227 | map.to = NOCT1(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip); | 1210 | map.to = NOCT1(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip); |
1228 | } else { | 1211 | } else { |
1229 | /* DNAT replies */ | 1212 | /* DNAT replies */ |
1230 | map.from = NOCT1(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip); | 1213 | map.from = NOCT1(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip); |
1231 | map.to = NOCT1(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip); | 1214 | map.to = NOCT1(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip); |
1232 | } | 1215 | } |
1233 | 1216 | ||
1234 | if (map.from == map.to) | 1217 | if (map.from == map.to) |
@@ -1294,11 +1277,11 @@ static struct ip_conntrack_helper snmp_helper = { | |||
1294 | .help = help, | 1277 | .help = help, |
1295 | .name = "snmp", | 1278 | .name = "snmp", |
1296 | 1279 | ||
1297 | .tuple = { .src = { .u = { __constant_htons(SNMP_PORT) } }, | 1280 | .tuple = {.src = {.u = {.udp = {.port = __constant_htons(SNMP_PORT)}}}, |
1298 | .dst = { .protonum = IPPROTO_UDP }, | 1281 | .dst = {.protonum = IPPROTO_UDP}, |
1299 | }, | 1282 | }, |
1300 | .mask = { .src = { .u = { 0xFFFF } }, | 1283 | .mask = {.src = {.u = {0xFFFF}}, |
1301 | .dst = { .protonum = 0xFF }, | 1284 | .dst = {.protonum = 0xFF}, |
1302 | }, | 1285 | }, |
1303 | }; | 1286 | }; |
1304 | 1287 | ||
@@ -1309,11 +1292,11 @@ static struct ip_conntrack_helper snmp_trap_helper = { | |||
1309 | .help = help, | 1292 | .help = help, |
1310 | .name = "snmp_trap", | 1293 | .name = "snmp_trap", |
1311 | 1294 | ||
1312 | .tuple = { .src = { .u = { __constant_htons(SNMP_TRAP_PORT) } }, | 1295 | .tuple = {.src = {.u = {.udp = {.port = __constant_htons(SNMP_TRAP_PORT)}}}, |
1313 | .dst = { .protonum = IPPROTO_UDP }, | 1296 | .dst = {.protonum = IPPROTO_UDP}, |
1314 | }, | 1297 | }, |
1315 | .mask = { .src = { .u = { 0xFFFF } }, | 1298 | .mask = {.src = {.u = {0xFFFF}}, |
1316 | .dst = { .protonum = 0xFF }, | 1299 | .dst = {.protonum = 0xFF}, |
1317 | }, | 1300 | }, |
1318 | }; | 1301 | }; |
1319 | 1302 | ||
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c index d85d2de50449..ad66328baa5d 100644 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ b/net/ipv4/netfilter/ip_nat_standalone.c | |||
@@ -44,12 +44,6 @@ | |||
44 | #define DEBUGP(format, args...) | 44 | #define DEBUGP(format, args...) |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | #define HOOKNAME(hooknum) ((hooknum) == NF_IP_POST_ROUTING ? "POST_ROUTING" \ | ||
48 | : ((hooknum) == NF_IP_PRE_ROUTING ? "PRE_ROUTING" \ | ||
49 | : ((hooknum) == NF_IP_LOCAL_OUT ? "LOCAL_OUT" \ | ||
50 | : ((hooknum) == NF_IP_LOCAL_IN ? "LOCAL_IN" \ | ||
51 | : "*ERROR*"))) | ||
52 | |||
53 | #ifdef CONFIG_XFRM | 47 | #ifdef CONFIG_XFRM |
54 | static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) | 48 | static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) |
55 | { | 49 | { |
diff --git a/net/ipv4/netfilter/ip_nat_tftp.c b/net/ipv4/netfilter/ip_nat_tftp.c index 94a78015451c..604793536fc1 100644 --- a/net/ipv4/netfilter/ip_nat_tftp.c +++ b/net/ipv4/netfilter/ip_nat_tftp.c | |||
@@ -55,15 +55,14 @@ static unsigned int help(struct sk_buff **pskb, | |||
55 | 55 | ||
56 | static void __exit ip_nat_tftp_fini(void) | 56 | static void __exit ip_nat_tftp_fini(void) |
57 | { | 57 | { |
58 | ip_nat_tftp_hook = NULL; | 58 | rcu_assign_pointer(ip_nat_tftp_hook, NULL); |
59 | /* Make sure noone calls it, meanwhile. */ | 59 | synchronize_rcu(); |
60 | synchronize_net(); | ||
61 | } | 60 | } |
62 | 61 | ||
63 | static int __init ip_nat_tftp_init(void) | 62 | static int __init ip_nat_tftp_init(void) |
64 | { | 63 | { |
65 | BUG_ON(ip_nat_tftp_hook); | 64 | BUG_ON(rcu_dereference(ip_nat_tftp_hook)); |
66 | ip_nat_tftp_hook = help; | 65 | rcu_assign_pointer(ip_nat_tftp_hook, help); |
67 | return 0; | 66 | return 0; |
68 | } | 67 | } |
69 | 68 | ||
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 97556cc2e4e0..cd520df4dcf4 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c | |||
@@ -243,7 +243,7 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) | |||
243 | pmsg->data_len = data_len; | 243 | pmsg->data_len = data_len; |
244 | pmsg->timestamp_sec = entry->skb->tstamp.off_sec; | 244 | pmsg->timestamp_sec = entry->skb->tstamp.off_sec; |
245 | pmsg->timestamp_usec = entry->skb->tstamp.off_usec; | 245 | pmsg->timestamp_usec = entry->skb->tstamp.off_usec; |
246 | pmsg->mark = entry->skb->nfmark; | 246 | pmsg->mark = entry->skb->mark; |
247 | pmsg->hook = entry->info->hook; | 247 | pmsg->hook = entry->info->hook; |
248 | pmsg->hw_protocol = entry->skb->protocol; | 248 | pmsg->hw_protocol = entry->skb->protocol; |
249 | 249 | ||
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 7a29d6e7baa7..098365062234 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -40,8 +40,6 @@ | |||
40 | #define DEBUGP | 40 | #define DEBUGP |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #define ASSERT_READ_LOCK(x) | ||
44 | |||
45 | MODULE_LICENSE("GPL"); | 43 | MODULE_LICENSE("GPL"); |
46 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 44 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
47 | MODULE_DESCRIPTION("iptables target for CLUSTERIP"); | 45 | MODULE_DESCRIPTION("iptables target for CLUSTERIP"); |
@@ -123,7 +121,6 @@ __clusterip_config_find(__be32 clusterip) | |||
123 | { | 121 | { |
124 | struct list_head *pos; | 122 | struct list_head *pos; |
125 | 123 | ||
126 | ASSERT_READ_LOCK(&clusterip_lock); | ||
127 | list_for_each(pos, &clusterip_configs) { | 124 | list_for_each(pos, &clusterip_configs) { |
128 | struct clusterip_config *c = list_entry(pos, | 125 | struct clusterip_config *c = list_entry(pos, |
129 | struct clusterip_config, list); | 126 | struct clusterip_config, list); |
@@ -170,7 +167,6 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, __be32 ip, | |||
170 | struct net_device *dev) | 167 | struct net_device *dev) |
171 | { | 168 | { |
172 | struct clusterip_config *c; | 169 | struct clusterip_config *c; |
173 | char buffer[16]; | ||
174 | 170 | ||
175 | c = kzalloc(sizeof(*c), GFP_ATOMIC); | 171 | c = kzalloc(sizeof(*c), GFP_ATOMIC); |
176 | if (!c) | 172 | if (!c) |
@@ -187,12 +183,17 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, __be32 ip, | |||
187 | atomic_set(&c->entries, 1); | 183 | atomic_set(&c->entries, 1); |
188 | 184 | ||
189 | #ifdef CONFIG_PROC_FS | 185 | #ifdef CONFIG_PROC_FS |
190 | /* create proc dir entry */ | 186 | { |
191 | sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip)); | 187 | char buffer[16]; |
192 | c->pde = create_proc_entry(buffer, S_IWUSR|S_IRUSR, clusterip_procdir); | 188 | |
193 | if (!c->pde) { | 189 | /* create proc dir entry */ |
194 | kfree(c); | 190 | sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip)); |
195 | return NULL; | 191 | c->pde = create_proc_entry(buffer, S_IWUSR|S_IRUSR, |
192 | clusterip_procdir); | ||
193 | if (!c->pde) { | ||
194 | kfree(c); | ||
195 | return NULL; | ||
196 | } | ||
196 | } | 197 | } |
197 | c->pde->proc_fops = &clusterip_proc_fops; | 198 | c->pde->proc_fops = &clusterip_proc_fops; |
198 | c->pde->data = c; | 199 | c->pde->data = c; |
@@ -205,6 +206,7 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, __be32 ip, | |||
205 | return c; | 206 | return c; |
206 | } | 207 | } |
207 | 208 | ||
209 | #ifdef CONFIG_PROC_FS | ||
208 | static int | 210 | static int |
209 | clusterip_add_node(struct clusterip_config *c, u_int16_t nodenum) | 211 | clusterip_add_node(struct clusterip_config *c, u_int16_t nodenum) |
210 | { | 212 | { |
@@ -232,6 +234,7 @@ clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum) | |||
232 | 234 | ||
233 | return 1; | 235 | return 1; |
234 | } | 236 | } |
237 | #endif | ||
235 | 238 | ||
236 | static inline u_int32_t | 239 | static inline u_int32_t |
237 | clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config) | 240 | clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config) |
@@ -737,8 +740,10 @@ static int __init ipt_clusterip_init(void) | |||
737 | CLUSTERIP_VERSION); | 740 | CLUSTERIP_VERSION); |
738 | return 0; | 741 | return 0; |
739 | 742 | ||
743 | #ifdef CONFIG_PROC_FS | ||
740 | cleanup_hook: | 744 | cleanup_hook: |
741 | nf_unregister_hook(&cip_arp_ops); | 745 | nf_unregister_hook(&cip_arp_ops); |
746 | #endif /* CONFIG_PROC_FS */ | ||
742 | cleanup_target: | 747 | cleanup_target: |
743 | ipt_unregister_target(&clusterip_tgt); | 748 | ipt_unregister_target(&clusterip_tgt); |
744 | return ret; | 749 | return ret; |
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c index 1aa4517fbcdb..b55d670a24df 100644 --- a/net/ipv4/netfilter/ipt_ECN.c +++ b/net/ipv4/netfilter/ipt_ECN.c | |||
@@ -28,17 +28,16 @@ static inline int | |||
28 | set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) | 28 | set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) |
29 | { | 29 | { |
30 | struct iphdr *iph = (*pskb)->nh.iph; | 30 | struct iphdr *iph = (*pskb)->nh.iph; |
31 | u_int16_t oldtos; | ||
32 | 31 | ||
33 | if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) { | 32 | if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) { |
33 | __u8 oldtos; | ||
34 | if (!skb_make_writable(pskb, sizeof(struct iphdr))) | 34 | if (!skb_make_writable(pskb, sizeof(struct iphdr))) |
35 | return 0; | 35 | return 0; |
36 | iph = (*pskb)->nh.iph; | 36 | iph = (*pskb)->nh.iph; |
37 | oldtos = iph->tos; | 37 | oldtos = iph->tos; |
38 | iph->tos &= ~IPT_ECN_IP_MASK; | 38 | iph->tos &= ~IPT_ECN_IP_MASK; |
39 | iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK); | 39 | iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK); |
40 | iph->check = nf_csum_update(htons(oldtos) ^ htons(0xFFFF), | 40 | nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); |
41 | htons(iph->tos), iph->check); | ||
42 | } | 41 | } |
43 | return 1; | 42 | return 1; |
44 | } | 43 | } |
@@ -72,10 +71,8 @@ set_ect_tcp(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) | |||
72 | if (einfo->operation & IPT_ECN_OP_SET_CWR) | 71 | if (einfo->operation & IPT_ECN_OP_SET_CWR) |
73 | tcph->cwr = einfo->proto.tcp.cwr; | 72 | tcph->cwr = einfo->proto.tcp.cwr; |
74 | 73 | ||
75 | tcph->check = nf_proto_csum_update((*pskb), | 74 | nf_proto_csum_replace2(&tcph->check, *pskb, |
76 | oldval ^ htons(0xFFFF), | 75 | oldval, ((__be16 *)tcph)[6], 0); |
77 | ((__be16 *)tcph)[6], | ||
78 | tcph->check, 0); | ||
79 | return 1; | 76 | return 1; |
80 | } | 77 | } |
81 | 78 | ||
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index 7dc820df8bc5..c96de16fefae 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c | |||
@@ -171,11 +171,15 @@ static void dump_packet(const struct nf_loginfo *info, | |||
171 | } | 171 | } |
172 | break; | 172 | break; |
173 | } | 173 | } |
174 | case IPPROTO_UDP: { | 174 | case IPPROTO_UDP: |
175 | case IPPROTO_UDPLITE: { | ||
175 | struct udphdr _udph, *uh; | 176 | struct udphdr _udph, *uh; |
176 | 177 | ||
177 | /* Max length: 10 "PROTO=UDP " */ | 178 | if (ih->protocol == IPPROTO_UDP) |
178 | printk("PROTO=UDP "); | 179 | /* Max length: 10 "PROTO=UDP " */ |
180 | printk("PROTO=UDP " ); | ||
181 | else /* Max length: 14 "PROTO=UDPLITE " */ | ||
182 | printk("PROTO=UDPLITE "); | ||
179 | 183 | ||
180 | if (ntohs(ih->frag_off) & IP_OFFSET) | 184 | if (ntohs(ih->frag_off) & IP_OFFSET) |
181 | break; | 185 | break; |
@@ -341,6 +345,7 @@ static void dump_packet(const struct nf_loginfo *info, | |||
341 | /* IP: 40+46+6+11+127 = 230 */ | 345 | /* IP: 40+46+6+11+127 = 230 */ |
342 | /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */ | 346 | /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */ |
343 | /* UDP: 10+max(25,20) = 35 */ | 347 | /* UDP: 10+max(25,20) = 35 */ |
348 | /* UDPLITE: 14+max(25,20) = 39 */ | ||
344 | /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */ | 349 | /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */ |
345 | /* ESP: 10+max(25)+15 = 50 */ | 350 | /* ESP: 10+max(25)+15 = 50 */ |
346 | /* AH: 9+max(25)+15 = 49 */ | 351 | /* AH: 9+max(25)+15 = 49 */ |
@@ -425,13 +430,8 @@ ipt_log_target(struct sk_buff **pskb, | |||
425 | li.u.log.level = loginfo->level; | 430 | li.u.log.level = loginfo->level; |
426 | li.u.log.logflags = loginfo->logflags; | 431 | li.u.log.logflags = loginfo->logflags; |
427 | 432 | ||
428 | if (loginfo->logflags & IPT_LOG_NFLOG) | 433 | ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, |
429 | nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li, | 434 | loginfo->prefix); |
430 | "%s", loginfo->prefix); | ||
431 | else | ||
432 | ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, | ||
433 | loginfo->prefix); | ||
434 | |||
435 | return IPT_CONTINUE; | 435 | return IPT_CONTINUE; |
436 | } | 436 | } |
437 | 437 | ||
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 3dbfcfac8a84..28b9233956b5 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c | |||
@@ -2,7 +2,7 @@ | |||
2 | (depending on route). */ | 2 | (depending on route). */ |
3 | 3 | ||
4 | /* (C) 1999-2001 Paul `Rusty' Russell | 4 | /* (C) 1999-2001 Paul `Rusty' Russell |
5 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> | 5 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
@@ -20,7 +20,11 @@ | |||
20 | #include <net/checksum.h> | 20 | #include <net/checksum.h> |
21 | #include <net/route.h> | 21 | #include <net/route.h> |
22 | #include <linux/netfilter_ipv4.h> | 22 | #include <linux/netfilter_ipv4.h> |
23 | #ifdef CONFIG_NF_NAT_NEEDED | ||
24 | #include <net/netfilter/nf_nat_rule.h> | ||
25 | #else | ||
23 | #include <linux/netfilter_ipv4/ip_nat_rule.h> | 26 | #include <linux/netfilter_ipv4/ip_nat_rule.h> |
27 | #endif | ||
24 | #include <linux/netfilter_ipv4/ip_tables.h> | 28 | #include <linux/netfilter_ipv4/ip_tables.h> |
25 | 29 | ||
26 | MODULE_LICENSE("GPL"); | 30 | MODULE_LICENSE("GPL"); |
@@ -65,23 +69,33 @@ masquerade_target(struct sk_buff **pskb, | |||
65 | const struct xt_target *target, | 69 | const struct xt_target *target, |
66 | const void *targinfo) | 70 | const void *targinfo) |
67 | { | 71 | { |
72 | #ifdef CONFIG_NF_NAT_NEEDED | ||
73 | struct nf_conn_nat *nat; | ||
74 | #endif | ||
68 | struct ip_conntrack *ct; | 75 | struct ip_conntrack *ct; |
69 | enum ip_conntrack_info ctinfo; | 76 | enum ip_conntrack_info ctinfo; |
70 | const struct ip_nat_multi_range_compat *mr; | ||
71 | struct ip_nat_range newrange; | 77 | struct ip_nat_range newrange; |
78 | const struct ip_nat_multi_range_compat *mr; | ||
72 | struct rtable *rt; | 79 | struct rtable *rt; |
73 | __be32 newsrc; | 80 | __be32 newsrc; |
74 | 81 | ||
75 | IP_NF_ASSERT(hooknum == NF_IP_POST_ROUTING); | 82 | IP_NF_ASSERT(hooknum == NF_IP_POST_ROUTING); |
76 | 83 | ||
77 | ct = ip_conntrack_get(*pskb, &ctinfo); | 84 | ct = ip_conntrack_get(*pskb, &ctinfo); |
85 | #ifdef CONFIG_NF_NAT_NEEDED | ||
86 | nat = nfct_nat(ct); | ||
87 | #endif | ||
78 | IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED | 88 | IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED |
79 | || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); | 89 | || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); |
80 | 90 | ||
81 | /* Source address is 0.0.0.0 - locally generated packet that is | 91 | /* Source address is 0.0.0.0 - locally generated packet that is |
82 | * probably not supposed to be masqueraded. | 92 | * probably not supposed to be masqueraded. |
83 | */ | 93 | */ |
94 | #ifdef CONFIG_NF_NAT_NEEDED | ||
95 | if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) | ||
96 | #else | ||
84 | if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip == 0) | 97 | if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip == 0) |
98 | #endif | ||
85 | return NF_ACCEPT; | 99 | return NF_ACCEPT; |
86 | 100 | ||
87 | mr = targinfo; | 101 | mr = targinfo; |
@@ -93,7 +107,11 @@ masquerade_target(struct sk_buff **pskb, | |||
93 | } | 107 | } |
94 | 108 | ||
95 | write_lock_bh(&masq_lock); | 109 | write_lock_bh(&masq_lock); |
110 | #ifdef CONFIG_NF_NAT_NEEDED | ||
111 | nat->masq_index = out->ifindex; | ||
112 | #else | ||
96 | ct->nat.masq_index = out->ifindex; | 113 | ct->nat.masq_index = out->ifindex; |
114 | #endif | ||
97 | write_unlock_bh(&masq_lock); | 115 | write_unlock_bh(&masq_lock); |
98 | 116 | ||
99 | /* Transfer from original range. */ | 117 | /* Transfer from original range. */ |
@@ -109,10 +127,17 @@ masquerade_target(struct sk_buff **pskb, | |||
109 | static inline int | 127 | static inline int |
110 | device_cmp(struct ip_conntrack *i, void *ifindex) | 128 | device_cmp(struct ip_conntrack *i, void *ifindex) |
111 | { | 129 | { |
130 | #ifdef CONFIG_NF_NAT_NEEDED | ||
131 | struct nf_conn_nat *nat = nfct_nat(i); | ||
132 | #endif | ||
112 | int ret; | 133 | int ret; |
113 | 134 | ||
114 | read_lock_bh(&masq_lock); | 135 | read_lock_bh(&masq_lock); |
136 | #ifdef CONFIG_NF_NAT_NEEDED | ||
137 | ret = (nat->masq_index == (int)(long)ifindex); | ||
138 | #else | ||
115 | ret = (i->nat.masq_index == (int)(long)ifindex); | 139 | ret = (i->nat.masq_index == (int)(long)ifindex); |
140 | #endif | ||
116 | read_unlock_bh(&masq_lock); | 141 | read_unlock_bh(&masq_lock); |
117 | 142 | ||
118 | return ret; | 143 | return ret; |
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c index 58a88f227108..9390e90f2b25 100644 --- a/net/ipv4/netfilter/ipt_NETMAP.c +++ b/net/ipv4/netfilter/ipt_NETMAP.c | |||
@@ -15,7 +15,11 @@ | |||
15 | #include <linux/netdevice.h> | 15 | #include <linux/netdevice.h> |
16 | #include <linux/netfilter.h> | 16 | #include <linux/netfilter.h> |
17 | #include <linux/netfilter_ipv4.h> | 17 | #include <linux/netfilter_ipv4.h> |
18 | #ifdef CONFIG_NF_NAT_NEEDED | ||
19 | #include <net/netfilter/nf_nat_rule.h> | ||
20 | #else | ||
18 | #include <linux/netfilter_ipv4/ip_nat_rule.h> | 21 | #include <linux/netfilter_ipv4/ip_nat_rule.h> |
22 | #endif | ||
19 | 23 | ||
20 | #define MODULENAME "NETMAP" | 24 | #define MODULENAME "NETMAP" |
21 | MODULE_LICENSE("GPL"); | 25 | MODULE_LICENSE("GPL"); |
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c index c0dcfe9d610c..462eceb3a1b1 100644 --- a/net/ipv4/netfilter/ipt_REDIRECT.c +++ b/net/ipv4/netfilter/ipt_REDIRECT.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* Redirect. Simple mapping which alters dst to a local IP address. */ | 1 | /* Redirect. Simple mapping which alters dst to a local IP address. */ |
2 | /* (C) 1999-2001 Paul `Rusty' Russell | 2 | /* (C) 1999-2001 Paul `Rusty' Russell |
3 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> | 3 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
@@ -18,7 +18,11 @@ | |||
18 | #include <net/protocol.h> | 18 | #include <net/protocol.h> |
19 | #include <net/checksum.h> | 19 | #include <net/checksum.h> |
20 | #include <linux/netfilter_ipv4.h> | 20 | #include <linux/netfilter_ipv4.h> |
21 | #ifdef CONFIG_NF_NAT_NEEDED | ||
22 | #include <net/netfilter/nf_nat_rule.h> | ||
23 | #else | ||
21 | #include <linux/netfilter_ipv4/ip_nat_rule.h> | 24 | #include <linux/netfilter_ipv4/ip_nat_rule.h> |
25 | #endif | ||
22 | 26 | ||
23 | MODULE_LICENSE("GPL"); | 27 | MODULE_LICENSE("GPL"); |
24 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); | 28 | MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); |
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index ad0312d0e4fd..f0319e5ee437 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c | |||
@@ -76,7 +76,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) | |||
76 | 76 | ||
77 | /* This packet will not be the same as the other: clear nf fields */ | 77 | /* This packet will not be the same as the other: clear nf fields */ |
78 | nf_reset(nskb); | 78 | nf_reset(nskb); |
79 | nskb->nfmark = 0; | 79 | nskb->mark = 0; |
80 | skb_init_secmark(nskb); | 80 | skb_init_secmark(nskb); |
81 | 81 | ||
82 | tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl); | 82 | tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl); |
@@ -114,6 +114,14 @@ static void send_reset(struct sk_buff *oldskb, int hook) | |||
114 | tcph->window = 0; | 114 | tcph->window = 0; |
115 | tcph->urg_ptr = 0; | 115 | tcph->urg_ptr = 0; |
116 | 116 | ||
117 | /* Adjust TCP checksum */ | ||
118 | tcph->check = 0; | ||
119 | tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr), | ||
120 | nskb->nh.iph->saddr, | ||
121 | nskb->nh.iph->daddr, | ||
122 | csum_partial((char *)tcph, | ||
123 | sizeof(struct tcphdr), 0)); | ||
124 | |||
117 | /* Set DF, id = 0 */ | 125 | /* Set DF, id = 0 */ |
118 | nskb->nh.iph->frag_off = htons(IP_DF); | 126 | nskb->nh.iph->frag_off = htons(IP_DF); |
119 | nskb->nh.iph->id = 0; | 127 | nskb->nh.iph->id = 0; |
@@ -129,14 +137,8 @@ static void send_reset(struct sk_buff *oldskb, int hook) | |||
129 | if (ip_route_me_harder(&nskb, addr_type)) | 137 | if (ip_route_me_harder(&nskb, addr_type)) |
130 | goto free_nskb; | 138 | goto free_nskb; |
131 | 139 | ||
132 | /* Adjust TCP checksum */ | ||
133 | nskb->ip_summed = CHECKSUM_NONE; | 140 | nskb->ip_summed = CHECKSUM_NONE; |
134 | tcph->check = 0; | 141 | |
135 | tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr), | ||
136 | nskb->nh.iph->saddr, | ||
137 | nskb->nh.iph->daddr, | ||
138 | csum_partial((char *)tcph, | ||
139 | sizeof(struct tcphdr), 0)); | ||
140 | /* Adjust IP TTL */ | 142 | /* Adjust IP TTL */ |
141 | nskb->nh.iph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT); | 143 | nskb->nh.iph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT); |
142 | 144 | ||
diff --git a/net/ipv4/netfilter/ipt_SAME.c b/net/ipv4/netfilter/ipt_SAME.c index b38b13328d73..3dcf29411337 100644 --- a/net/ipv4/netfilter/ipt_SAME.c +++ b/net/ipv4/netfilter/ipt_SAME.c | |||
@@ -34,7 +34,11 @@ | |||
34 | #include <net/protocol.h> | 34 | #include <net/protocol.h> |
35 | #include <net/checksum.h> | 35 | #include <net/checksum.h> |
36 | #include <linux/netfilter_ipv4.h> | 36 | #include <linux/netfilter_ipv4.h> |
37 | #ifdef CONFIG_NF_NAT_NEEDED | ||
38 | #include <net/netfilter/nf_nat_rule.h> | ||
39 | #else | ||
37 | #include <linux/netfilter_ipv4/ip_nat_rule.h> | 40 | #include <linux/netfilter_ipv4/ip_nat_rule.h> |
41 | #endif | ||
38 | #include <linux/netfilter_ipv4/ipt_SAME.h> | 42 | #include <linux/netfilter_ipv4/ipt_SAME.h> |
39 | 43 | ||
40 | MODULE_LICENSE("GPL"); | 44 | MODULE_LICENSE("GPL"); |
@@ -152,11 +156,17 @@ same_target(struct sk_buff **pskb, | |||
152 | Here we calculate the index in same->iparray which | 156 | Here we calculate the index in same->iparray which |
153 | holds the ipaddress we should use */ | 157 | holds the ipaddress we should use */ |
154 | 158 | ||
159 | #ifdef CONFIG_NF_NAT_NEEDED | ||
160 | tmpip = ntohl(t->src.u3.ip); | ||
161 | |||
162 | if (!(same->info & IPT_SAME_NODST)) | ||
163 | tmpip += ntohl(t->dst.u3.ip); | ||
164 | #else | ||
155 | tmpip = ntohl(t->src.ip); | 165 | tmpip = ntohl(t->src.ip); |
156 | 166 | ||
157 | if (!(same->info & IPT_SAME_NODST)) | 167 | if (!(same->info & IPT_SAME_NODST)) |
158 | tmpip += ntohl(t->dst.ip); | 168 | tmpip += ntohl(t->dst.ip); |
159 | 169 | #endif | |
160 | aindex = tmpip % same->ipnum; | 170 | aindex = tmpip % same->ipnum; |
161 | 171 | ||
162 | new_ip = htonl(same->iparray[aindex]); | 172 | new_ip = htonl(same->iparray[aindex]); |
diff --git a/net/ipv4/netfilter/ipt_TCPMSS.c b/net/ipv4/netfilter/ipt_TCPMSS.c index 108b6b76311f..93eb5c3c1884 100644 --- a/net/ipv4/netfilter/ipt_TCPMSS.c +++ b/net/ipv4/netfilter/ipt_TCPMSS.c | |||
@@ -97,10 +97,8 @@ ipt_tcpmss_target(struct sk_buff **pskb, | |||
97 | opt[i+2] = (newmss & 0xff00) >> 8; | 97 | opt[i+2] = (newmss & 0xff00) >> 8; |
98 | opt[i+3] = (newmss & 0x00ff); | 98 | opt[i+3] = (newmss & 0x00ff); |
99 | 99 | ||
100 | tcph->check = nf_proto_csum_update(*pskb, | 100 | nf_proto_csum_replace2(&tcph->check, *pskb, |
101 | htons(oldmss)^htons(0xFFFF), | 101 | htons(oldmss), htons(newmss), 0); |
102 | htons(newmss), | ||
103 | tcph->check, 0); | ||
104 | return IPT_CONTINUE; | 102 | return IPT_CONTINUE; |
105 | } | 103 | } |
106 | } | 104 | } |
@@ -126,28 +124,22 @@ ipt_tcpmss_target(struct sk_buff **pskb, | |||
126 | opt = (u_int8_t *)tcph + sizeof(struct tcphdr); | 124 | opt = (u_int8_t *)tcph + sizeof(struct tcphdr); |
127 | memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); | 125 | memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); |
128 | 126 | ||
129 | tcph->check = nf_proto_csum_update(*pskb, | 127 | nf_proto_csum_replace2(&tcph->check, *pskb, |
130 | htons(tcplen) ^ htons(0xFFFF), | 128 | htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1); |
131 | htons(tcplen + TCPOLEN_MSS), | ||
132 | tcph->check, 1); | ||
133 | opt[0] = TCPOPT_MSS; | 129 | opt[0] = TCPOPT_MSS; |
134 | opt[1] = TCPOLEN_MSS; | 130 | opt[1] = TCPOLEN_MSS; |
135 | opt[2] = (newmss & 0xff00) >> 8; | 131 | opt[2] = (newmss & 0xff00) >> 8; |
136 | opt[3] = (newmss & 0x00ff); | 132 | opt[3] = (newmss & 0x00ff); |
137 | 133 | ||
138 | tcph->check = nf_proto_csum_update(*pskb, htonl(~0), *((__be32 *)opt), | 134 | nf_proto_csum_replace4(&tcph->check, *pskb, 0, *((__be32 *)opt), 0); |
139 | tcph->check, 0); | ||
140 | 135 | ||
141 | oldval = ((__be16 *)tcph)[6]; | 136 | oldval = ((__be16 *)tcph)[6]; |
142 | tcph->doff += TCPOLEN_MSS/4; | 137 | tcph->doff += TCPOLEN_MSS/4; |
143 | tcph->check = nf_proto_csum_update(*pskb, | 138 | nf_proto_csum_replace2(&tcph->check, *pskb, |
144 | oldval ^ htons(0xFFFF), | 139 | oldval, ((__be16 *)tcph)[6], 0); |
145 | ((__be16 *)tcph)[6], | ||
146 | tcph->check, 0); | ||
147 | 140 | ||
148 | newtotlen = htons(ntohs(iph->tot_len) + TCPOLEN_MSS); | 141 | newtotlen = htons(ntohs(iph->tot_len) + TCPOLEN_MSS); |
149 | iph->check = nf_csum_update(iph->tot_len ^ htons(0xFFFF), | 142 | nf_csum_replace2(&iph->check, iph->tot_len, newtotlen); |
150 | newtotlen, iph->check); | ||
151 | iph->tot_len = newtotlen; | 143 | iph->tot_len = newtotlen; |
152 | return IPT_CONTINUE; | 144 | return IPT_CONTINUE; |
153 | } | 145 | } |
diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c index 83b80b3a5d2f..18e74ac4d425 100644 --- a/net/ipv4/netfilter/ipt_TOS.c +++ b/net/ipv4/netfilter/ipt_TOS.c | |||
@@ -30,16 +30,15 @@ target(struct sk_buff **pskb, | |||
30 | { | 30 | { |
31 | const struct ipt_tos_target_info *tosinfo = targinfo; | 31 | const struct ipt_tos_target_info *tosinfo = targinfo; |
32 | struct iphdr *iph = (*pskb)->nh.iph; | 32 | struct iphdr *iph = (*pskb)->nh.iph; |
33 | u_int16_t oldtos; | ||
34 | 33 | ||
35 | if ((iph->tos & IPTOS_TOS_MASK) != tosinfo->tos) { | 34 | if ((iph->tos & IPTOS_TOS_MASK) != tosinfo->tos) { |
35 | __u8 oldtos; | ||
36 | if (!skb_make_writable(pskb, sizeof(struct iphdr))) | 36 | if (!skb_make_writable(pskb, sizeof(struct iphdr))) |
37 | return NF_DROP; | 37 | return NF_DROP; |
38 | iph = (*pskb)->nh.iph; | 38 | iph = (*pskb)->nh.iph; |
39 | oldtos = iph->tos; | 39 | oldtos = iph->tos; |
40 | iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos; | 40 | iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos; |
41 | iph->check = nf_csum_update(htons(oldtos) ^ htons(0xFFFF), | 41 | nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); |
42 | htons(iph->tos), iph->check); | ||
43 | } | 42 | } |
44 | return IPT_CONTINUE; | 43 | return IPT_CONTINUE; |
45 | } | 44 | } |
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c index ac9517d62af0..fffe5ca82e91 100644 --- a/net/ipv4/netfilter/ipt_TTL.c +++ b/net/ipv4/netfilter/ipt_TTL.c | |||
@@ -54,9 +54,8 @@ ipt_ttl_target(struct sk_buff **pskb, | |||
54 | } | 54 | } |
55 | 55 | ||
56 | if (new_ttl != iph->ttl) { | 56 | if (new_ttl != iph->ttl) { |
57 | iph->check = nf_csum_update(htons((iph->ttl << 8)) ^ htons(0xFFFF), | 57 | nf_csum_replace2(&iph->check, htons(iph->ttl << 8), |
58 | htons(new_ttl << 8), | 58 | htons(new_ttl << 8)); |
59 | iph->check); | ||
60 | iph->ttl = new_ttl; | 59 | iph->ttl = new_ttl; |
61 | } | 60 | } |
62 | 61 | ||
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 2b104ea54f48..dbd34783a64d 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c | |||
@@ -239,7 +239,7 @@ static void ipt_ulog_packet(unsigned int hooknum, | |||
239 | pm->data_len = copy_len; | 239 | pm->data_len = copy_len; |
240 | pm->timestamp_sec = skb->tstamp.off_sec; | 240 | pm->timestamp_sec = skb->tstamp.off_sec; |
241 | pm->timestamp_usec = skb->tstamp.off_usec; | 241 | pm->timestamp_usec = skb->tstamp.off_usec; |
242 | pm->mark = skb->nfmark; | 242 | pm->mark = skb->mark; |
243 | pm->hook = hooknum; | 243 | pm->hook = hooknum; |
244 | if (prefix != NULL) | 244 | if (prefix != NULL) |
245 | strncpy(pm->prefix, prefix, sizeof(pm->prefix)); | 245 | strncpy(pm->prefix, prefix, sizeof(pm->prefix)); |
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index b91f3582359b..af2939889444 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c | |||
@@ -132,7 +132,7 @@ ipt_local_hook(unsigned int hook, | |||
132 | unsigned int ret; | 132 | unsigned int ret; |
133 | u_int8_t tos; | 133 | u_int8_t tos; |
134 | __be32 saddr, daddr; | 134 | __be32 saddr, daddr; |
135 | unsigned long nfmark; | 135 | u_int32_t mark; |
136 | 136 | ||
137 | /* root is playing with raw sockets. */ | 137 | /* root is playing with raw sockets. */ |
138 | if ((*pskb)->len < sizeof(struct iphdr) | 138 | if ((*pskb)->len < sizeof(struct iphdr) |
@@ -143,7 +143,7 @@ ipt_local_hook(unsigned int hook, | |||
143 | } | 143 | } |
144 | 144 | ||
145 | /* Save things which could affect route */ | 145 | /* Save things which could affect route */ |
146 | nfmark = (*pskb)->nfmark; | 146 | mark = (*pskb)->mark; |
147 | saddr = (*pskb)->nh.iph->saddr; | 147 | saddr = (*pskb)->nh.iph->saddr; |
148 | daddr = (*pskb)->nh.iph->daddr; | 148 | daddr = (*pskb)->nh.iph->daddr; |
149 | tos = (*pskb)->nh.iph->tos; | 149 | tos = (*pskb)->nh.iph->tos; |
@@ -153,9 +153,7 @@ ipt_local_hook(unsigned int hook, | |||
153 | if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE | 153 | if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE |
154 | && ((*pskb)->nh.iph->saddr != saddr | 154 | && ((*pskb)->nh.iph->saddr != saddr |
155 | || (*pskb)->nh.iph->daddr != daddr | 155 | || (*pskb)->nh.iph->daddr != daddr |
156 | #ifdef CONFIG_IP_ROUTE_FWMARK | 156 | || (*pskb)->mark != mark |
157 | || (*pskb)->nfmark != nfmark | ||
158 | #endif | ||
159 | || (*pskb)->nh.iph->tos != tos)) | 157 | || (*pskb)->nh.iph->tos != tos)) |
160 | if (ip_route_me_harder(pskb, RTN_UNSPEC)) | 158 | if (ip_route_me_harder(pskb, RTN_UNSPEC)) |
161 | ret = NF_DROP; | 159 | ret = NF_DROP; |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 0af803df82b0..471b638cedec 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/netfilter_ipv4.h> | 27 | #include <linux/netfilter_ipv4.h> |
28 | #include <net/netfilter/nf_conntrack.h> | 28 | #include <net/netfilter/nf_conntrack.h> |
29 | #include <net/netfilter/nf_conntrack_helper.h> | 29 | #include <net/netfilter/nf_conntrack_helper.h> |
30 | #include <net/netfilter/nf_conntrack_protocol.h> | 30 | #include <net/netfilter/nf_conntrack_l4proto.h> |
31 | #include <net/netfilter/nf_conntrack_l3proto.h> | 31 | #include <net/netfilter/nf_conntrack_l3proto.h> |
32 | #include <net/netfilter/nf_conntrack_core.h> | 32 | #include <net/netfilter/nf_conntrack_core.h> |
33 | #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> | 33 | #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> |
@@ -38,12 +38,10 @@ | |||
38 | #define DEBUGP(format, args...) | 38 | #define DEBUGP(format, args...) |
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | DECLARE_PER_CPU(struct nf_conntrack_stat, nf_conntrack_stat); | ||
42 | |||
43 | static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, | 41 | static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, |
44 | struct nf_conntrack_tuple *tuple) | 42 | struct nf_conntrack_tuple *tuple) |
45 | { | 43 | { |
46 | u_int32_t _addrs[2], *ap; | 44 | __be32 _addrs[2], *ap; |
47 | ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr), | 45 | ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr), |
48 | sizeof(u_int32_t) * 2, _addrs); | 46 | sizeof(u_int32_t) * 2, _addrs); |
49 | if (ap == NULL) | 47 | if (ap == NULL) |
@@ -113,10 +111,12 @@ ipv4_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, | |||
113 | return NF_ACCEPT; | 111 | return NF_ACCEPT; |
114 | } | 112 | } |
115 | 113 | ||
116 | int nat_module_is_loaded = 0; | 114 | int nf_nat_module_is_loaded = 0; |
115 | EXPORT_SYMBOL_GPL(nf_nat_module_is_loaded); | ||
116 | |||
117 | static u_int32_t ipv4_get_features(const struct nf_conntrack_tuple *tuple) | 117 | static u_int32_t ipv4_get_features(const struct nf_conntrack_tuple *tuple) |
118 | { | 118 | { |
119 | if (nat_module_is_loaded) | 119 | if (nf_nat_module_is_loaded) |
120 | return NF_CT_F_NAT; | 120 | return NF_CT_F_NAT; |
121 | 121 | ||
122 | return NF_CT_F_BASIC; | 122 | return NF_CT_F_BASIC; |
@@ -268,43 +268,59 @@ static struct nf_hook_ops ipv4_conntrack_ops[] = { | |||
268 | }, | 268 | }, |
269 | }; | 269 | }; |
270 | 270 | ||
271 | #ifdef CONFIG_SYSCTL | 271 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) |
272 | /* From nf_conntrack_proto_icmp.c */ | 272 | static int log_invalid_proto_min = 0; |
273 | extern unsigned int nf_ct_icmp_timeout; | 273 | static int log_invalid_proto_max = 255; |
274 | static struct ctl_table_header *nf_ct_ipv4_sysctl_header; | ||
275 | 274 | ||
276 | static ctl_table nf_ct_sysctl_table[] = { | 275 | static ctl_table ip_ct_sysctl_table[] = { |
277 | { | 276 | { |
278 | .ctl_name = NET_NF_CONNTRACK_ICMP_TIMEOUT, | 277 | .ctl_name = NET_IPV4_NF_CONNTRACK_MAX, |
279 | .procname = "nf_conntrack_icmp_timeout", | 278 | .procname = "ip_conntrack_max", |
280 | .data = &nf_ct_icmp_timeout, | 279 | .data = &nf_conntrack_max, |
281 | .maxlen = sizeof(unsigned int), | 280 | .maxlen = sizeof(int), |
282 | .mode = 0644, | 281 | .mode = 0644, |
283 | .proc_handler = &proc_dointvec_jiffies, | 282 | .proc_handler = &proc_dointvec, |
284 | }, | 283 | }, |
285 | { .ctl_name = 0 } | ||
286 | }; | ||
287 | |||
288 | static ctl_table nf_ct_netfilter_table[] = { | ||
289 | { | 284 | { |
290 | .ctl_name = NET_NETFILTER, | 285 | .ctl_name = NET_IPV4_NF_CONNTRACK_COUNT, |
291 | .procname = "netfilter", | 286 | .procname = "ip_conntrack_count", |
292 | .mode = 0555, | 287 | .data = &nf_conntrack_count, |
293 | .child = nf_ct_sysctl_table, | 288 | .maxlen = sizeof(int), |
289 | .mode = 0444, | ||
290 | .proc_handler = &proc_dointvec, | ||
291 | }, | ||
292 | { | ||
293 | .ctl_name = NET_IPV4_NF_CONNTRACK_BUCKETS, | ||
294 | .procname = "ip_conntrack_buckets", | ||
295 | .data = &nf_conntrack_htable_size, | ||
296 | .maxlen = sizeof(unsigned int), | ||
297 | .mode = 0444, | ||
298 | .proc_handler = &proc_dointvec, | ||
299 | }, | ||
300 | { | ||
301 | .ctl_name = NET_IPV4_NF_CONNTRACK_CHECKSUM, | ||
302 | .procname = "ip_conntrack_checksum", | ||
303 | .data = &nf_conntrack_checksum, | ||
304 | .maxlen = sizeof(int), | ||
305 | .mode = 0644, | ||
306 | .proc_handler = &proc_dointvec, | ||
294 | }, | 307 | }, |
295 | { .ctl_name = 0 } | ||
296 | }; | ||
297 | |||
298 | static ctl_table nf_ct_net_table[] = { | ||
299 | { | 308 | { |
300 | .ctl_name = CTL_NET, | 309 | .ctl_name = NET_IPV4_NF_CONNTRACK_LOG_INVALID, |
301 | .procname = "net", | 310 | .procname = "ip_conntrack_log_invalid", |
302 | .mode = 0555, | 311 | .data = &nf_ct_log_invalid, |
303 | .child = nf_ct_netfilter_table, | 312 | .maxlen = sizeof(unsigned int), |
313 | .mode = 0644, | ||
314 | .proc_handler = &proc_dointvec_minmax, | ||
315 | .strategy = &sysctl_intvec, | ||
316 | .extra1 = &log_invalid_proto_min, | ||
317 | .extra2 = &log_invalid_proto_max, | ||
304 | }, | 318 | }, |
305 | { .ctl_name = 0 } | 319 | { |
320 | .ctl_name = 0 | ||
321 | } | ||
306 | }; | 322 | }; |
307 | #endif | 323 | #endif /* CONFIG_SYSCTL && CONFIG_NF_CONNTRACK_PROC_COMPAT */ |
308 | 324 | ||
309 | /* Fast function for those who don't want to parse /proc (and I don't | 325 | /* Fast function for those who don't want to parse /proc (and I don't |
310 | blame them). */ | 326 | blame them). */ |
@@ -396,10 +412,8 @@ static int ipv4_nfattr_to_tuple(struct nfattr *tb[], | |||
396 | if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip)) | 412 | if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip)) |
397 | return -EINVAL; | 413 | return -EINVAL; |
398 | 414 | ||
399 | t->src.u3.ip = | 415 | t->src.u3.ip = *(__be32 *)NFA_DATA(tb[CTA_IP_V4_SRC-1]); |
400 | *(u_int32_t *)NFA_DATA(tb[CTA_IP_V4_SRC-1]); | 416 | t->dst.u3.ip = *(__be32 *)NFA_DATA(tb[CTA_IP_V4_DST-1]); |
401 | t->dst.u3.ip = | ||
402 | *(u_int32_t *)NFA_DATA(tb[CTA_IP_V4_DST-1]); | ||
403 | 417 | ||
404 | return 0; | 418 | return 0; |
405 | } | 419 | } |
@@ -426,14 +440,15 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = { | |||
426 | .tuple_to_nfattr = ipv4_tuple_to_nfattr, | 440 | .tuple_to_nfattr = ipv4_tuple_to_nfattr, |
427 | .nfattr_to_tuple = ipv4_nfattr_to_tuple, | 441 | .nfattr_to_tuple = ipv4_nfattr_to_tuple, |
428 | #endif | 442 | #endif |
443 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) | ||
444 | .ctl_table_path = nf_net_ipv4_netfilter_sysctl_path, | ||
445 | .ctl_table = ip_ct_sysctl_table, | ||
446 | #endif | ||
429 | .me = THIS_MODULE, | 447 | .me = THIS_MODULE, |
430 | }; | 448 | }; |
431 | 449 | ||
432 | extern struct nf_conntrack_protocol nf_conntrack_protocol_tcp4; | ||
433 | extern struct nf_conntrack_protocol nf_conntrack_protocol_udp4; | ||
434 | extern struct nf_conntrack_protocol nf_conntrack_protocol_icmp; | ||
435 | |||
436 | MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET)); | 450 | MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET)); |
451 | MODULE_ALIAS("ip_conntrack"); | ||
437 | MODULE_LICENSE("GPL"); | 452 | MODULE_LICENSE("GPL"); |
438 | 453 | ||
439 | static int __init nf_conntrack_l3proto_ipv4_init(void) | 454 | static int __init nf_conntrack_l3proto_ipv4_init(void) |
@@ -448,19 +463,19 @@ static int __init nf_conntrack_l3proto_ipv4_init(void) | |||
448 | return ret; | 463 | return ret; |
449 | } | 464 | } |
450 | 465 | ||
451 | ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_tcp4); | 466 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp4); |
452 | if (ret < 0) { | 467 | if (ret < 0) { |
453 | printk("nf_conntrack_ipv4: can't register tcp.\n"); | 468 | printk("nf_conntrack_ipv4: can't register tcp.\n"); |
454 | goto cleanup_sockopt; | 469 | goto cleanup_sockopt; |
455 | } | 470 | } |
456 | 471 | ||
457 | ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_udp4); | 472 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp4); |
458 | if (ret < 0) { | 473 | if (ret < 0) { |
459 | printk("nf_conntrack_ipv4: can't register udp.\n"); | 474 | printk("nf_conntrack_ipv4: can't register udp.\n"); |
460 | goto cleanup_tcp; | 475 | goto cleanup_tcp; |
461 | } | 476 | } |
462 | 477 | ||
463 | ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_icmp); | 478 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmp); |
464 | if (ret < 0) { | 479 | if (ret < 0) { |
465 | printk("nf_conntrack_ipv4: can't register icmp.\n"); | 480 | printk("nf_conntrack_ipv4: can't register icmp.\n"); |
466 | goto cleanup_udp; | 481 | goto cleanup_udp; |
@@ -478,28 +493,24 @@ static int __init nf_conntrack_l3proto_ipv4_init(void) | |||
478 | printk("nf_conntrack_ipv4: can't register hooks.\n"); | 493 | printk("nf_conntrack_ipv4: can't register hooks.\n"); |
479 | goto cleanup_ipv4; | 494 | goto cleanup_ipv4; |
480 | } | 495 | } |
481 | #ifdef CONFIG_SYSCTL | 496 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) |
482 | nf_ct_ipv4_sysctl_header = register_sysctl_table(nf_ct_net_table, 0); | 497 | ret = nf_conntrack_ipv4_compat_init(); |
483 | if (nf_ct_ipv4_sysctl_header == NULL) { | 498 | if (ret < 0) |
484 | printk("nf_conntrack: can't register to sysctl.\n"); | ||
485 | ret = -ENOMEM; | ||
486 | goto cleanup_hooks; | 499 | goto cleanup_hooks; |
487 | } | ||
488 | #endif | 500 | #endif |
489 | return ret; | 501 | return ret; |
490 | 502 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) | |
491 | #ifdef CONFIG_SYSCTL | ||
492 | cleanup_hooks: | 503 | cleanup_hooks: |
493 | nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); | 504 | nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); |
494 | #endif | 505 | #endif |
495 | cleanup_ipv4: | 506 | cleanup_ipv4: |
496 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); | 507 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); |
497 | cleanup_icmp: | 508 | cleanup_icmp: |
498 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmp); | 509 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp); |
499 | cleanup_udp: | 510 | cleanup_udp: |
500 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp4); | 511 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4); |
501 | cleanup_tcp: | 512 | cleanup_tcp: |
502 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp4); | 513 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4); |
503 | cleanup_sockopt: | 514 | cleanup_sockopt: |
504 | nf_unregister_sockopt(&so_getorigdst); | 515 | nf_unregister_sockopt(&so_getorigdst); |
505 | return ret; | 516 | return ret; |
@@ -508,18 +519,16 @@ static int __init nf_conntrack_l3proto_ipv4_init(void) | |||
508 | static void __exit nf_conntrack_l3proto_ipv4_fini(void) | 519 | static void __exit nf_conntrack_l3proto_ipv4_fini(void) |
509 | { | 520 | { |
510 | synchronize_net(); | 521 | synchronize_net(); |
511 | #ifdef CONFIG_SYSCTL | 522 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) |
512 | unregister_sysctl_table(nf_ct_ipv4_sysctl_header); | 523 | nf_conntrack_ipv4_compat_fini(); |
513 | #endif | 524 | #endif |
514 | nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); | 525 | nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); |
515 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); | 526 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); |
516 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmp); | 527 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp); |
517 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp4); | 528 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4); |
518 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp4); | 529 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4); |
519 | nf_unregister_sockopt(&so_getorigdst); | 530 | nf_unregister_sockopt(&so_getorigdst); |
520 | } | 531 | } |
521 | 532 | ||
522 | module_init(nf_conntrack_l3proto_ipv4_init); | 533 | module_init(nf_conntrack_l3proto_ipv4_init); |
523 | module_exit(nf_conntrack_l3proto_ipv4_fini); | 534 | module_exit(nf_conntrack_l3proto_ipv4_fini); |
524 | |||
525 | EXPORT_SYMBOL(nf_ct_ipv4_gather_frags); | ||
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c new file mode 100644 index 000000000000..3b31bc649608 --- /dev/null +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | |||
@@ -0,0 +1,412 @@ | |||
1 | /* ip_conntrack proc compat - based on ip_conntrack_standalone.c | ||
2 | * | ||
3 | * (C) 1999-2001 Paul `Rusty' Russell | ||
4 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/proc_fs.h> | ||
12 | #include <linux/seq_file.h> | ||
13 | #include <linux/percpu.h> | ||
14 | |||
15 | #include <linux/netfilter.h> | ||
16 | #include <net/netfilter/nf_conntrack_core.h> | ||
17 | #include <net/netfilter/nf_conntrack_l3proto.h> | ||
18 | #include <net/netfilter/nf_conntrack_l4proto.h> | ||
19 | #include <net/netfilter/nf_conntrack_expect.h> | ||
20 | |||
21 | #if 0 | ||
22 | #define DEBUGP printk | ||
23 | #else | ||
24 | #define DEBUGP(format, args...) | ||
25 | #endif | ||
26 | |||
27 | #ifdef CONFIG_NF_CT_ACCT | ||
28 | static unsigned int | ||
29 | seq_print_counters(struct seq_file *s, | ||
30 | const struct ip_conntrack_counter *counter) | ||
31 | { | ||
32 | return seq_printf(s, "packets=%llu bytes=%llu ", | ||
33 | (unsigned long long)counter->packets, | ||
34 | (unsigned long long)counter->bytes); | ||
35 | } | ||
36 | #else | ||
37 | #define seq_print_counters(x, y) 0 | ||
38 | #endif | ||
39 | |||
40 | struct ct_iter_state { | ||
41 | unsigned int bucket; | ||
42 | }; | ||
43 | |||
44 | static struct list_head *ct_get_first(struct seq_file *seq) | ||
45 | { | ||
46 | struct ct_iter_state *st = seq->private; | ||
47 | |||
48 | for (st->bucket = 0; | ||
49 | st->bucket < nf_conntrack_htable_size; | ||
50 | st->bucket++) { | ||
51 | if (!list_empty(&nf_conntrack_hash[st->bucket])) | ||
52 | return nf_conntrack_hash[st->bucket].next; | ||
53 | } | ||
54 | return NULL; | ||
55 | } | ||
56 | |||
57 | static struct list_head *ct_get_next(struct seq_file *seq, struct list_head *head) | ||
58 | { | ||
59 | struct ct_iter_state *st = seq->private; | ||
60 | |||
61 | head = head->next; | ||
62 | while (head == &nf_conntrack_hash[st->bucket]) { | ||
63 | if (++st->bucket >= nf_conntrack_htable_size) | ||
64 | return NULL; | ||
65 | head = nf_conntrack_hash[st->bucket].next; | ||
66 | } | ||
67 | return head; | ||
68 | } | ||
69 | |||
70 | static struct list_head *ct_get_idx(struct seq_file *seq, loff_t pos) | ||
71 | { | ||
72 | struct list_head *head = ct_get_first(seq); | ||
73 | |||
74 | if (head) | ||
75 | while (pos && (head = ct_get_next(seq, head))) | ||
76 | pos--; | ||
77 | return pos ? NULL : head; | ||
78 | } | ||
79 | |||
80 | static void *ct_seq_start(struct seq_file *seq, loff_t *pos) | ||
81 | { | ||
82 | read_lock_bh(&nf_conntrack_lock); | ||
83 | return ct_get_idx(seq, *pos); | ||
84 | } | ||
85 | |||
86 | static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos) | ||
87 | { | ||
88 | (*pos)++; | ||
89 | return ct_get_next(s, v); | ||
90 | } | ||
91 | |||
92 | static void ct_seq_stop(struct seq_file *s, void *v) | ||
93 | { | ||
94 | read_unlock_bh(&nf_conntrack_lock); | ||
95 | } | ||
96 | |||
97 | static int ct_seq_show(struct seq_file *s, void *v) | ||
98 | { | ||
99 | const struct nf_conntrack_tuple_hash *hash = v; | ||
100 | const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); | ||
101 | struct nf_conntrack_l3proto *l3proto; | ||
102 | struct nf_conntrack_l4proto *l4proto; | ||
103 | |||
104 | NF_CT_ASSERT(ct); | ||
105 | |||
106 | /* we only want to print DIR_ORIGINAL */ | ||
107 | if (NF_CT_DIRECTION(hash)) | ||
108 | return 0; | ||
109 | if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num != AF_INET) | ||
110 | return 0; | ||
111 | |||
112 | l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL] | ||
113 | .tuple.src.l3num); | ||
114 | NF_CT_ASSERT(l3proto); | ||
115 | l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_ORIGINAL] | ||
116 | .tuple.src.l3num, | ||
117 | ct->tuplehash[IP_CT_DIR_ORIGINAL] | ||
118 | .tuple.dst.protonum); | ||
119 | NF_CT_ASSERT(l4proto); | ||
120 | |||
121 | if (seq_printf(s, "%-8s %u %ld ", | ||
122 | l4proto->name, | ||
123 | ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum, | ||
124 | timer_pending(&ct->timeout) | ||
125 | ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0) | ||
126 | return -ENOSPC; | ||
127 | |||
128 | if (l3proto->print_conntrack(s, ct)) | ||
129 | return -ENOSPC; | ||
130 | |||
131 | if (l4proto->print_conntrack(s, ct)) | ||
132 | return -ENOSPC; | ||
133 | |||
134 | if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | ||
135 | l3proto, l4proto)) | ||
136 | return -ENOSPC; | ||
137 | |||
138 | if (seq_print_counters(s, &ct->counters[IP_CT_DIR_ORIGINAL])) | ||
139 | return -ENOSPC; | ||
140 | |||
141 | if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) | ||
142 | if (seq_printf(s, "[UNREPLIED] ")) | ||
143 | return -ENOSPC; | ||
144 | |||
145 | if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, | ||
146 | l3proto, l4proto)) | ||
147 | return -ENOSPC; | ||
148 | |||
149 | if (seq_print_counters(s, &ct->counters[IP_CT_DIR_REPLY])) | ||
150 | return -ENOSPC; | ||
151 | |||
152 | if (test_bit(IPS_ASSURED_BIT, &ct->status)) | ||
153 | if (seq_printf(s, "[ASSURED] ")) | ||
154 | return -ENOSPC; | ||
155 | |||
156 | #ifdef CONFIG_NF_CONNTRACK_MARK | ||
157 | if (seq_printf(s, "mark=%u ", ct->mark)) | ||
158 | return -ENOSPC; | ||
159 | #endif | ||
160 | |||
161 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | ||
162 | if (seq_printf(s, "secmark=%u ", ct->secmark)) | ||
163 | return -ENOSPC; | ||
164 | #endif | ||
165 | |||
166 | if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) | ||
167 | return -ENOSPC; | ||
168 | |||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | static struct seq_operations ct_seq_ops = { | ||
173 | .start = ct_seq_start, | ||
174 | .next = ct_seq_next, | ||
175 | .stop = ct_seq_stop, | ||
176 | .show = ct_seq_show | ||
177 | }; | ||
178 | |||
179 | static int ct_open(struct inode *inode, struct file *file) | ||
180 | { | ||
181 | struct seq_file *seq; | ||
182 | struct ct_iter_state *st; | ||
183 | int ret; | ||
184 | |||
185 | st = kmalloc(sizeof(struct ct_iter_state), GFP_KERNEL); | ||
186 | if (st == NULL) | ||
187 | return -ENOMEM; | ||
188 | ret = seq_open(file, &ct_seq_ops); | ||
189 | if (ret) | ||
190 | goto out_free; | ||
191 | seq = file->private_data; | ||
192 | seq->private = st; | ||
193 | memset(st, 0, sizeof(struct ct_iter_state)); | ||
194 | return ret; | ||
195 | out_free: | ||
196 | kfree(st); | ||
197 | return ret; | ||
198 | } | ||
199 | |||
200 | static struct file_operations ct_file_ops = { | ||
201 | .owner = THIS_MODULE, | ||
202 | .open = ct_open, | ||
203 | .read = seq_read, | ||
204 | .llseek = seq_lseek, | ||
205 | .release = seq_release_private, | ||
206 | }; | ||
207 | |||
208 | /* expects */ | ||
209 | static void *exp_seq_start(struct seq_file *s, loff_t *pos) | ||
210 | { | ||
211 | struct list_head *e = &nf_conntrack_expect_list; | ||
212 | loff_t i; | ||
213 | |||
214 | /* strange seq_file api calls stop even if we fail, | ||
215 | * thus we need to grab lock since stop unlocks */ | ||
216 | read_lock_bh(&nf_conntrack_lock); | ||
217 | |||
218 | if (list_empty(e)) | ||
219 | return NULL; | ||
220 | |||
221 | for (i = 0; i <= *pos; i++) { | ||
222 | e = e->next; | ||
223 | if (e == &nf_conntrack_expect_list) | ||
224 | return NULL; | ||
225 | } | ||
226 | return e; | ||
227 | } | ||
228 | |||
229 | static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) | ||
230 | { | ||
231 | struct list_head *e = v; | ||
232 | |||
233 | ++*pos; | ||
234 | e = e->next; | ||
235 | |||
236 | if (e == &nf_conntrack_expect_list) | ||
237 | return NULL; | ||
238 | |||
239 | return e; | ||
240 | } | ||
241 | |||
242 | static void exp_seq_stop(struct seq_file *s, void *v) | ||
243 | { | ||
244 | read_unlock_bh(&nf_conntrack_lock); | ||
245 | } | ||
246 | |||
247 | static int exp_seq_show(struct seq_file *s, void *v) | ||
248 | { | ||
249 | struct nf_conntrack_expect *exp = v; | ||
250 | |||
251 | if (exp->tuple.src.l3num != AF_INET) | ||
252 | return 0; | ||
253 | |||
254 | if (exp->timeout.function) | ||
255 | seq_printf(s, "%ld ", timer_pending(&exp->timeout) | ||
256 | ? (long)(exp->timeout.expires - jiffies)/HZ : 0); | ||
257 | else | ||
258 | seq_printf(s, "- "); | ||
259 | |||
260 | seq_printf(s, "proto=%u ", exp->tuple.dst.protonum); | ||
261 | |||
262 | print_tuple(s, &exp->tuple, | ||
263 | __nf_ct_l3proto_find(exp->tuple.src.l3num), | ||
264 | __nf_ct_l4proto_find(exp->tuple.src.l3num, | ||
265 | exp->tuple.dst.protonum)); | ||
266 | return seq_putc(s, '\n'); | ||
267 | } | ||
268 | |||
269 | static struct seq_operations exp_seq_ops = { | ||
270 | .start = exp_seq_start, | ||
271 | .next = exp_seq_next, | ||
272 | .stop = exp_seq_stop, | ||
273 | .show = exp_seq_show | ||
274 | }; | ||
275 | |||
276 | static int exp_open(struct inode *inode, struct file *file) | ||
277 | { | ||
278 | return seq_open(file, &exp_seq_ops); | ||
279 | } | ||
280 | |||
281 | static struct file_operations ip_exp_file_ops = { | ||
282 | .owner = THIS_MODULE, | ||
283 | .open = exp_open, | ||
284 | .read = seq_read, | ||
285 | .llseek = seq_lseek, | ||
286 | .release = seq_release | ||
287 | }; | ||
288 | |||
289 | static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) | ||
290 | { | ||
291 | int cpu; | ||
292 | |||
293 | if (*pos == 0) | ||
294 | return SEQ_START_TOKEN; | ||
295 | |||
296 | for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | ||
297 | if (!cpu_possible(cpu)) | ||
298 | continue; | ||
299 | *pos = cpu+1; | ||
300 | return &per_cpu(nf_conntrack_stat, cpu); | ||
301 | } | ||
302 | |||
303 | return NULL; | ||
304 | } | ||
305 | |||
306 | static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
307 | { | ||
308 | int cpu; | ||
309 | |||
310 | for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | ||
311 | if (!cpu_possible(cpu)) | ||
312 | continue; | ||
313 | *pos = cpu+1; | ||
314 | return &per_cpu(nf_conntrack_stat, cpu); | ||
315 | } | ||
316 | |||
317 | return NULL; | ||
318 | } | ||
319 | |||
320 | static void ct_cpu_seq_stop(struct seq_file *seq, void *v) | ||
321 | { | ||
322 | } | ||
323 | |||
324 | static int ct_cpu_seq_show(struct seq_file *seq, void *v) | ||
325 | { | ||
326 | unsigned int nr_conntracks = atomic_read(&nf_conntrack_count); | ||
327 | struct ip_conntrack_stat *st = v; | ||
328 | |||
329 | if (v == SEQ_START_TOKEN) { | ||
330 | seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n"); | ||
331 | return 0; | ||
332 | } | ||
333 | |||
334 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x " | ||
335 | "%08x %08x %08x %08x %08x %08x %08x %08x \n", | ||
336 | nr_conntracks, | ||
337 | st->searched, | ||
338 | st->found, | ||
339 | st->new, | ||
340 | st->invalid, | ||
341 | st->ignore, | ||
342 | st->delete, | ||
343 | st->delete_list, | ||
344 | st->insert, | ||
345 | st->insert_failed, | ||
346 | st->drop, | ||
347 | st->early_drop, | ||
348 | st->error, | ||
349 | |||
350 | st->expect_new, | ||
351 | st->expect_create, | ||
352 | st->expect_delete | ||
353 | ); | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | static struct seq_operations ct_cpu_seq_ops = { | ||
358 | .start = ct_cpu_seq_start, | ||
359 | .next = ct_cpu_seq_next, | ||
360 | .stop = ct_cpu_seq_stop, | ||
361 | .show = ct_cpu_seq_show, | ||
362 | }; | ||
363 | |||
364 | static int ct_cpu_seq_open(struct inode *inode, struct file *file) | ||
365 | { | ||
366 | return seq_open(file, &ct_cpu_seq_ops); | ||
367 | } | ||
368 | |||
369 | static struct file_operations ct_cpu_seq_fops = { | ||
370 | .owner = THIS_MODULE, | ||
371 | .open = ct_cpu_seq_open, | ||
372 | .read = seq_read, | ||
373 | .llseek = seq_lseek, | ||
374 | .release = seq_release_private, | ||
375 | }; | ||
376 | |||
377 | int __init nf_conntrack_ipv4_compat_init(void) | ||
378 | { | ||
379 | struct proc_dir_entry *proc, *proc_exp, *proc_stat; | ||
380 | |||
381 | proc = proc_net_fops_create("ip_conntrack", 0440, &ct_file_ops); | ||
382 | if (!proc) | ||
383 | goto err1; | ||
384 | |||
385 | proc_exp = proc_net_fops_create("ip_conntrack_expect", 0440, | ||
386 | &ip_exp_file_ops); | ||
387 | if (!proc_exp) | ||
388 | goto err2; | ||
389 | |||
390 | proc_stat = create_proc_entry("ip_conntrack", S_IRUGO, proc_net_stat); | ||
391 | if (!proc_stat) | ||
392 | goto err3; | ||
393 | |||
394 | proc_stat->proc_fops = &ct_cpu_seq_fops; | ||
395 | proc_stat->owner = THIS_MODULE; | ||
396 | |||
397 | return 0; | ||
398 | |||
399 | err3: | ||
400 | proc_net_remove("ip_conntrack_expect"); | ||
401 | err2: | ||
402 | proc_net_remove("ip_conntrack"); | ||
403 | err1: | ||
404 | return -ENOMEM; | ||
405 | } | ||
406 | |||
407 | void __exit nf_conntrack_ipv4_compat_fini(void) | ||
408 | { | ||
409 | remove_proc_entry("ip_conntrack", proc_net_stat); | ||
410 | proc_net_remove("ip_conntrack_expect"); | ||
411 | proc_net_remove("ip_conntrack"); | ||
412 | } | ||
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 790f00d500c3..db9e7c45d3b4 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c | |||
@@ -22,10 +22,10 @@ | |||
22 | #include <net/checksum.h> | 22 | #include <net/checksum.h> |
23 | #include <linux/netfilter_ipv4.h> | 23 | #include <linux/netfilter_ipv4.h> |
24 | #include <net/netfilter/nf_conntrack_tuple.h> | 24 | #include <net/netfilter/nf_conntrack_tuple.h> |
25 | #include <net/netfilter/nf_conntrack_protocol.h> | 25 | #include <net/netfilter/nf_conntrack_l4proto.h> |
26 | #include <net/netfilter/nf_conntrack_core.h> | 26 | #include <net/netfilter/nf_conntrack_core.h> |
27 | 27 | ||
28 | unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ; | 28 | static unsigned long nf_ct_icmp_timeout __read_mostly = 30*HZ; |
29 | 29 | ||
30 | #if 0 | 30 | #if 0 |
31 | #define DEBUGP printk | 31 | #define DEBUGP printk |
@@ -152,7 +152,7 @@ icmp_error_message(struct sk_buff *skb, | |||
152 | struct icmphdr icmp; | 152 | struct icmphdr icmp; |
153 | struct iphdr ip; | 153 | struct iphdr ip; |
154 | } _in, *inside; | 154 | } _in, *inside; |
155 | struct nf_conntrack_protocol *innerproto; | 155 | struct nf_conntrack_l4proto *innerproto; |
156 | struct nf_conntrack_tuple_hash *h; | 156 | struct nf_conntrack_tuple_hash *h; |
157 | int dataoff; | 157 | int dataoff; |
158 | 158 | ||
@@ -170,7 +170,7 @@ icmp_error_message(struct sk_buff *skb, | |||
170 | return -NF_ACCEPT; | 170 | return -NF_ACCEPT; |
171 | } | 171 | } |
172 | 172 | ||
173 | innerproto = __nf_ct_proto_find(PF_INET, inside->ip.protocol); | 173 | innerproto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); |
174 | dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp); | 174 | dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp); |
175 | /* Are they talking about one of our connections? */ | 175 | /* Are they talking about one of our connections? */ |
176 | if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET, | 176 | if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET, |
@@ -311,7 +311,7 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[], | |||
311 | tuple->dst.u.icmp.code = | 311 | tuple->dst.u.icmp.code = |
312 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]); | 312 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]); |
313 | tuple->src.u.icmp.id = | 313 | tuple->src.u.icmp.id = |
314 | *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_ICMP_ID-1]); | 314 | *(__be16 *)NFA_DATA(tb[CTA_PROTO_ICMP_ID-1]); |
315 | 315 | ||
316 | if (tuple->dst.u.icmp.type >= sizeof(invmap) | 316 | if (tuple->dst.u.icmp.type >= sizeof(invmap) |
317 | || !invmap[tuple->dst.u.icmp.type]) | 317 | || !invmap[tuple->dst.u.icmp.type]) |
@@ -321,11 +321,42 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[], | |||
321 | } | 321 | } |
322 | #endif | 322 | #endif |
323 | 323 | ||
324 | struct nf_conntrack_protocol nf_conntrack_protocol_icmp = | 324 | #ifdef CONFIG_SYSCTL |
325 | static struct ctl_table_header *icmp_sysctl_header; | ||
326 | static struct ctl_table icmp_sysctl_table[] = { | ||
327 | { | ||
328 | .ctl_name = NET_NF_CONNTRACK_ICMP_TIMEOUT, | ||
329 | .procname = "nf_conntrack_icmp_timeout", | ||
330 | .data = &nf_ct_icmp_timeout, | ||
331 | .maxlen = sizeof(unsigned int), | ||
332 | .mode = 0644, | ||
333 | .proc_handler = &proc_dointvec_jiffies, | ||
334 | }, | ||
335 | { | ||
336 | .ctl_name = 0 | ||
337 | } | ||
338 | }; | ||
339 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT | ||
340 | static struct ctl_table icmp_compat_sysctl_table[] = { | ||
341 | { | ||
342 | .ctl_name = NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT, | ||
343 | .procname = "ip_conntrack_icmp_timeout", | ||
344 | .data = &nf_ct_icmp_timeout, | ||
345 | .maxlen = sizeof(unsigned int), | ||
346 | .mode = 0644, | ||
347 | .proc_handler = &proc_dointvec_jiffies, | ||
348 | }, | ||
349 | { | ||
350 | .ctl_name = 0 | ||
351 | } | ||
352 | }; | ||
353 | #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ | ||
354 | #endif /* CONFIG_SYSCTL */ | ||
355 | |||
356 | struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = | ||
325 | { | 357 | { |
326 | .list = { NULL, NULL }, | ||
327 | .l3proto = PF_INET, | 358 | .l3proto = PF_INET, |
328 | .proto = IPPROTO_ICMP, | 359 | .l4proto = IPPROTO_ICMP, |
329 | .name = "icmp", | 360 | .name = "icmp", |
330 | .pkt_to_tuple = icmp_pkt_to_tuple, | 361 | .pkt_to_tuple = icmp_pkt_to_tuple, |
331 | .invert_tuple = icmp_invert_tuple, | 362 | .invert_tuple = icmp_invert_tuple, |
@@ -341,6 +372,12 @@ struct nf_conntrack_protocol nf_conntrack_protocol_icmp = | |||
341 | .tuple_to_nfattr = icmp_tuple_to_nfattr, | 372 | .tuple_to_nfattr = icmp_tuple_to_nfattr, |
342 | .nfattr_to_tuple = icmp_nfattr_to_tuple, | 373 | .nfattr_to_tuple = icmp_nfattr_to_tuple, |
343 | #endif | 374 | #endif |
375 | #ifdef CONFIG_SYSCTL | ||
376 | .ctl_table_header = &icmp_sysctl_header, | ||
377 | .ctl_table = icmp_sysctl_table, | ||
378 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT | ||
379 | .ctl_compat_table = icmp_compat_sysctl_table, | ||
380 | #endif | ||
381 | #endif | ||
344 | }; | 382 | }; |
345 | 383 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_icmp); | |
346 | EXPORT_SYMBOL(nf_conntrack_protocol_icmp); | ||
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c new file mode 100644 index 000000000000..0f17098917bc --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_amanda.c | |||
@@ -0,0 +1,78 @@ | |||
1 | /* Amanda extension for TCP NAT alteration. | ||
2 | * (C) 2002 by Brian J. Murrell <netfilter@interlinx.bc.ca> | ||
3 | * based on a copy of HW's ip_nat_irc.c as well as other modules | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/skbuff.h> | ||
14 | #include <linux/udp.h> | ||
15 | |||
16 | #include <net/netfilter/nf_nat_helper.h> | ||
17 | #include <net/netfilter/nf_nat_rule.h> | ||
18 | #include <net/netfilter/nf_conntrack_helper.h> | ||
19 | #include <net/netfilter/nf_conntrack_expect.h> | ||
20 | #include <linux/netfilter/nf_conntrack_amanda.h> | ||
21 | |||
22 | MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>"); | ||
23 | MODULE_DESCRIPTION("Amanda NAT helper"); | ||
24 | MODULE_LICENSE("GPL"); | ||
25 | MODULE_ALIAS("ip_nat_amanda"); | ||
26 | |||
27 | static unsigned int help(struct sk_buff **pskb, | ||
28 | enum ip_conntrack_info ctinfo, | ||
29 | unsigned int matchoff, | ||
30 | unsigned int matchlen, | ||
31 | struct nf_conntrack_expect *exp) | ||
32 | { | ||
33 | char buffer[sizeof("65535")]; | ||
34 | u_int16_t port; | ||
35 | unsigned int ret; | ||
36 | |||
37 | /* Connection comes from client. */ | ||
38 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | ||
39 | exp->dir = IP_CT_DIR_ORIGINAL; | ||
40 | |||
41 | /* When you see the packet, we need to NAT it the same as the | ||
42 | * this one (ie. same IP: it will be TCP and master is UDP). */ | ||
43 | exp->expectfn = nf_nat_follow_master; | ||
44 | |||
45 | /* Try to get same port: if not, try to change it. */ | ||
46 | for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { | ||
47 | exp->tuple.dst.u.tcp.port = htons(port); | ||
48 | if (nf_conntrack_expect_related(exp) == 0) | ||
49 | break; | ||
50 | } | ||
51 | |||
52 | if (port == 0) | ||
53 | return NF_DROP; | ||
54 | |||
55 | sprintf(buffer, "%u", port); | ||
56 | ret = nf_nat_mangle_udp_packet(pskb, exp->master, ctinfo, | ||
57 | matchoff, matchlen, | ||
58 | buffer, strlen(buffer)); | ||
59 | if (ret != NF_ACCEPT) | ||
60 | nf_conntrack_unexpect_related(exp); | ||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | static void __exit nf_nat_amanda_fini(void) | ||
65 | { | ||
66 | rcu_assign_pointer(nf_nat_amanda_hook, NULL); | ||
67 | synchronize_rcu(); | ||
68 | } | ||
69 | |||
70 | static int __init nf_nat_amanda_init(void) | ||
71 | { | ||
72 | BUG_ON(rcu_dereference(nf_nat_amanda_hook)); | ||
73 | rcu_assign_pointer(nf_nat_amanda_hook, help); | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | module_init(nf_nat_amanda_init); | ||
78 | module_exit(nf_nat_amanda_fini); | ||
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c new file mode 100644 index 000000000000..86a92272b053 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_core.c | |||
@@ -0,0 +1,647 @@ | |||
1 | /* NAT for netfilter; shared with compatibility layer. */ | ||
2 | |||
3 | /* (C) 1999-2001 Paul `Rusty' Russell | ||
4 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/timer.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/vmalloc.h> | ||
16 | #include <net/checksum.h> | ||
17 | #include <net/icmp.h> | ||
18 | #include <net/ip.h> | ||
19 | #include <net/tcp.h> /* For tcp_prot in getorigdst */ | ||
20 | #include <linux/icmp.h> | ||
21 | #include <linux/udp.h> | ||
22 | #include <linux/jhash.h> | ||
23 | |||
24 | #include <linux/netfilter_ipv4.h> | ||
25 | #include <net/netfilter/nf_conntrack.h> | ||
26 | #include <net/netfilter/nf_conntrack_core.h> | ||
27 | #include <net/netfilter/nf_nat.h> | ||
28 | #include <net/netfilter/nf_nat_protocol.h> | ||
29 | #include <net/netfilter/nf_nat_core.h> | ||
30 | #include <net/netfilter/nf_nat_helper.h> | ||
31 | #include <net/netfilter/nf_conntrack_helper.h> | ||
32 | #include <net/netfilter/nf_conntrack_l3proto.h> | ||
33 | #include <net/netfilter/nf_conntrack_l4proto.h> | ||
34 | |||
35 | #if 0 | ||
36 | #define DEBUGP printk | ||
37 | #else | ||
38 | #define DEBUGP(format, args...) | ||
39 | #endif | ||
40 | |||
41 | static DEFINE_RWLOCK(nf_nat_lock); | ||
42 | |||
43 | static struct nf_conntrack_l3proto *l3proto = NULL; | ||
44 | |||
45 | /* Calculated at init based on memory size */ | ||
46 | static unsigned int nf_nat_htable_size; | ||
47 | |||
48 | static struct list_head *bysource; | ||
49 | |||
50 | #define MAX_IP_NAT_PROTO 256 | ||
51 | static struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]; | ||
52 | |||
53 | static inline struct nf_nat_protocol * | ||
54 | __nf_nat_proto_find(u_int8_t protonum) | ||
55 | { | ||
56 | return nf_nat_protos[protonum]; | ||
57 | } | ||
58 | |||
59 | struct nf_nat_protocol * | ||
60 | nf_nat_proto_find_get(u_int8_t protonum) | ||
61 | { | ||
62 | struct nf_nat_protocol *p; | ||
63 | |||
64 | /* we need to disable preemption to make sure 'p' doesn't get | ||
65 | * removed until we've grabbed the reference */ | ||
66 | preempt_disable(); | ||
67 | p = __nf_nat_proto_find(protonum); | ||
68 | if (!try_module_get(p->me)) | ||
69 | p = &nf_nat_unknown_protocol; | ||
70 | preempt_enable(); | ||
71 | |||
72 | return p; | ||
73 | } | ||
74 | EXPORT_SYMBOL_GPL(nf_nat_proto_find_get); | ||
75 | |||
76 | void | ||
77 | nf_nat_proto_put(struct nf_nat_protocol *p) | ||
78 | { | ||
79 | module_put(p->me); | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(nf_nat_proto_put); | ||
82 | |||
83 | /* We keep an extra hash for each conntrack, for fast searching. */ | ||
84 | static inline unsigned int | ||
85 | hash_by_src(const struct nf_conntrack_tuple *tuple) | ||
86 | { | ||
87 | /* Original src, to ensure we map it consistently if poss. */ | ||
88 | return jhash_3words((__force u32)tuple->src.u3.ip, tuple->src.u.all, | ||
89 | tuple->dst.protonum, 0) % nf_nat_htable_size; | ||
90 | } | ||
91 | |||
92 | /* Noone using conntrack by the time this called. */ | ||
93 | static void nf_nat_cleanup_conntrack(struct nf_conn *conn) | ||
94 | { | ||
95 | struct nf_conn_nat *nat; | ||
96 | if (!(conn->status & IPS_NAT_DONE_MASK)) | ||
97 | return; | ||
98 | |||
99 | nat = nfct_nat(conn); | ||
100 | write_lock_bh(&nf_nat_lock); | ||
101 | list_del(&nat->info.bysource); | ||
102 | write_unlock_bh(&nf_nat_lock); | ||
103 | } | ||
104 | |||
105 | /* Is this tuple already taken? (not by us) */ | ||
106 | int | ||
107 | nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, | ||
108 | const struct nf_conn *ignored_conntrack) | ||
109 | { | ||
110 | /* Conntrack tracking doesn't keep track of outgoing tuples; only | ||
111 | incoming ones. NAT means they don't have a fixed mapping, | ||
112 | so we invert the tuple and look for the incoming reply. | ||
113 | |||
114 | We could keep a separate hash if this proves too slow. */ | ||
115 | struct nf_conntrack_tuple reply; | ||
116 | |||
117 | nf_ct_invert_tuplepr(&reply, tuple); | ||
118 | return nf_conntrack_tuple_taken(&reply, ignored_conntrack); | ||
119 | } | ||
120 | EXPORT_SYMBOL(nf_nat_used_tuple); | ||
121 | |||
122 | /* If we source map this tuple so reply looks like reply_tuple, will | ||
123 | * that meet the constraints of range. */ | ||
124 | static int | ||
125 | in_range(const struct nf_conntrack_tuple *tuple, | ||
126 | const struct nf_nat_range *range) | ||
127 | { | ||
128 | struct nf_nat_protocol *proto; | ||
129 | |||
130 | proto = __nf_nat_proto_find(tuple->dst.protonum); | ||
131 | /* If we are supposed to map IPs, then we must be in the | ||
132 | range specified, otherwise let this drag us onto a new src IP. */ | ||
133 | if (range->flags & IP_NAT_RANGE_MAP_IPS) { | ||
134 | if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) || | ||
135 | ntohl(tuple->src.u3.ip) > ntohl(range->max_ip)) | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || | ||
140 | proto->in_range(tuple, IP_NAT_MANIP_SRC, | ||
141 | &range->min, &range->max)) | ||
142 | return 1; | ||
143 | |||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static inline int | ||
148 | same_src(const struct nf_conn *ct, | ||
149 | const struct nf_conntrack_tuple *tuple) | ||
150 | { | ||
151 | const struct nf_conntrack_tuple *t; | ||
152 | |||
153 | t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; | ||
154 | return (t->dst.protonum == tuple->dst.protonum && | ||
155 | t->src.u3.ip == tuple->src.u3.ip && | ||
156 | t->src.u.all == tuple->src.u.all); | ||
157 | } | ||
158 | |||
159 | /* Only called for SRC manip */ | ||
160 | static int | ||
161 | find_appropriate_src(const struct nf_conntrack_tuple *tuple, | ||
162 | struct nf_conntrack_tuple *result, | ||
163 | const struct nf_nat_range *range) | ||
164 | { | ||
165 | unsigned int h = hash_by_src(tuple); | ||
166 | struct nf_conn_nat *nat; | ||
167 | struct nf_conn *ct; | ||
168 | |||
169 | read_lock_bh(&nf_nat_lock); | ||
170 | list_for_each_entry(nat, &bysource[h], info.bysource) { | ||
171 | ct = (struct nf_conn *)((char *)nat - offsetof(struct nf_conn, data)); | ||
172 | if (same_src(ct, tuple)) { | ||
173 | /* Copy source part from reply tuple. */ | ||
174 | nf_ct_invert_tuplepr(result, | ||
175 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | ||
176 | result->dst = tuple->dst; | ||
177 | |||
178 | if (in_range(result, range)) { | ||
179 | read_unlock_bh(&nf_nat_lock); | ||
180 | return 1; | ||
181 | } | ||
182 | } | ||
183 | } | ||
184 | read_unlock_bh(&nf_nat_lock); | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | /* For [FUTURE] fragmentation handling, we want the least-used | ||
189 | src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus | ||
190 | if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports | ||
191 | 1-65535, we don't do pro-rata allocation based on ports; we choose | ||
192 | the ip with the lowest src-ip/dst-ip/proto usage. | ||
193 | */ | ||
194 | static void | ||
195 | find_best_ips_proto(struct nf_conntrack_tuple *tuple, | ||
196 | const struct nf_nat_range *range, | ||
197 | const struct nf_conn *ct, | ||
198 | enum nf_nat_manip_type maniptype) | ||
199 | { | ||
200 | __be32 *var_ipp; | ||
201 | /* Host order */ | ||
202 | u_int32_t minip, maxip, j; | ||
203 | |||
204 | /* No IP mapping? Do nothing. */ | ||
205 | if (!(range->flags & IP_NAT_RANGE_MAP_IPS)) | ||
206 | return; | ||
207 | |||
208 | if (maniptype == IP_NAT_MANIP_SRC) | ||
209 | var_ipp = &tuple->src.u3.ip; | ||
210 | else | ||
211 | var_ipp = &tuple->dst.u3.ip; | ||
212 | |||
213 | /* Fast path: only one choice. */ | ||
214 | if (range->min_ip == range->max_ip) { | ||
215 | *var_ipp = range->min_ip; | ||
216 | return; | ||
217 | } | ||
218 | |||
219 | /* Hashing source and destination IPs gives a fairly even | ||
220 | * spread in practice (if there are a small number of IPs | ||
221 | * involved, there usually aren't that many connections | ||
222 | * anyway). The consistency means that servers see the same | ||
223 | * client coming from the same IP (some Internet Banking sites | ||
224 | * like this), even across reboots. */ | ||
225 | minip = ntohl(range->min_ip); | ||
226 | maxip = ntohl(range->max_ip); | ||
227 | j = jhash_2words((__force u32)tuple->src.u3.ip, | ||
228 | (__force u32)tuple->dst.u3.ip, 0); | ||
229 | *var_ipp = htonl(minip + j % (maxip - minip + 1)); | ||
230 | } | ||
231 | |||
232 | /* Manipulate the tuple into the range given. For NF_IP_POST_ROUTING, | ||
233 | * we change the source to map into the range. For NF_IP_PRE_ROUTING | ||
234 | * and NF_IP_LOCAL_OUT, we change the destination to map into the | ||
235 | * range. It might not be possible to get a unique tuple, but we try. | ||
236 | * At worst (or if we race), we will end up with a final duplicate in | ||
237 | * __ip_conntrack_confirm and drop the packet. */ | ||
238 | static void | ||
239 | get_unique_tuple(struct nf_conntrack_tuple *tuple, | ||
240 | const struct nf_conntrack_tuple *orig_tuple, | ||
241 | const struct nf_nat_range *range, | ||
242 | struct nf_conn *ct, | ||
243 | enum nf_nat_manip_type maniptype) | ||
244 | { | ||
245 | struct nf_nat_protocol *proto; | ||
246 | |||
247 | /* 1) If this srcip/proto/src-proto-part is currently mapped, | ||
248 | and that same mapping gives a unique tuple within the given | ||
249 | range, use that. | ||
250 | |||
251 | This is only required for source (ie. NAT/masq) mappings. | ||
252 | So far, we don't do local source mappings, so multiple | ||
253 | manips not an issue. */ | ||
254 | if (maniptype == IP_NAT_MANIP_SRC) { | ||
255 | if (find_appropriate_src(orig_tuple, tuple, range)) { | ||
256 | DEBUGP("get_unique_tuple: Found current src map\n"); | ||
257 | if (!nf_nat_used_tuple(tuple, ct)) | ||
258 | return; | ||
259 | } | ||
260 | } | ||
261 | |||
262 | /* 2) Select the least-used IP/proto combination in the given | ||
263 | range. */ | ||
264 | *tuple = *orig_tuple; | ||
265 | find_best_ips_proto(tuple, range, ct, maniptype); | ||
266 | |||
267 | /* 3) The per-protocol part of the manip is made to map into | ||
268 | the range to make a unique tuple. */ | ||
269 | |||
270 | proto = nf_nat_proto_find_get(orig_tuple->dst.protonum); | ||
271 | |||
272 | /* Only bother mapping if it's not already in range and unique */ | ||
273 | if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || | ||
274 | proto->in_range(tuple, maniptype, &range->min, &range->max)) && | ||
275 | !nf_nat_used_tuple(tuple, ct)) { | ||
276 | nf_nat_proto_put(proto); | ||
277 | return; | ||
278 | } | ||
279 | |||
280 | /* Last change: get protocol to try to obtain unique tuple. */ | ||
281 | proto->unique_tuple(tuple, range, maniptype, ct); | ||
282 | |||
283 | nf_nat_proto_put(proto); | ||
284 | } | ||
285 | |||
286 | unsigned int | ||
287 | nf_nat_setup_info(struct nf_conn *ct, | ||
288 | const struct nf_nat_range *range, | ||
289 | unsigned int hooknum) | ||
290 | { | ||
291 | struct nf_conntrack_tuple curr_tuple, new_tuple; | ||
292 | struct nf_conn_nat *nat = nfct_nat(ct); | ||
293 | struct nf_nat_info *info = &nat->info; | ||
294 | int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK); | ||
295 | enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum); | ||
296 | |||
297 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || | ||
298 | hooknum == NF_IP_POST_ROUTING || | ||
299 | hooknum == NF_IP_LOCAL_IN || | ||
300 | hooknum == NF_IP_LOCAL_OUT); | ||
301 | BUG_ON(nf_nat_initialized(ct, maniptype)); | ||
302 | |||
303 | /* What we've got will look like inverse of reply. Normally | ||
304 | this is what is in the conntrack, except for prior | ||
305 | manipulations (future optimization: if num_manips == 0, | ||
306 | orig_tp = | ||
307 | conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */ | ||
308 | nf_ct_invert_tuplepr(&curr_tuple, | ||
309 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | ||
310 | |||
311 | get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); | ||
312 | |||
313 | if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) { | ||
314 | struct nf_conntrack_tuple reply; | ||
315 | |||
316 | /* Alter conntrack table so will recognize replies. */ | ||
317 | nf_ct_invert_tuplepr(&reply, &new_tuple); | ||
318 | nf_conntrack_alter_reply(ct, &reply); | ||
319 | |||
320 | /* Non-atomic: we own this at the moment. */ | ||
321 | if (maniptype == IP_NAT_MANIP_SRC) | ||
322 | ct->status |= IPS_SRC_NAT; | ||
323 | else | ||
324 | ct->status |= IPS_DST_NAT; | ||
325 | } | ||
326 | |||
327 | /* Place in source hash if this is the first time. */ | ||
328 | if (have_to_hash) { | ||
329 | unsigned int srchash; | ||
330 | |||
331 | srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | ||
332 | write_lock_bh(&nf_nat_lock); | ||
333 | list_add(&info->bysource, &bysource[srchash]); | ||
334 | write_unlock_bh(&nf_nat_lock); | ||
335 | } | ||
336 | |||
337 | /* It's done. */ | ||
338 | if (maniptype == IP_NAT_MANIP_DST) | ||
339 | set_bit(IPS_DST_NAT_DONE_BIT, &ct->status); | ||
340 | else | ||
341 | set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status); | ||
342 | |||
343 | return NF_ACCEPT; | ||
344 | } | ||
345 | EXPORT_SYMBOL(nf_nat_setup_info); | ||
346 | |||
347 | /* Returns true if succeeded. */ | ||
348 | static int | ||
349 | manip_pkt(u_int16_t proto, | ||
350 | struct sk_buff **pskb, | ||
351 | unsigned int iphdroff, | ||
352 | const struct nf_conntrack_tuple *target, | ||
353 | enum nf_nat_manip_type maniptype) | ||
354 | { | ||
355 | struct iphdr *iph; | ||
356 | struct nf_nat_protocol *p; | ||
357 | |||
358 | if (!skb_make_writable(pskb, iphdroff + sizeof(*iph))) | ||
359 | return 0; | ||
360 | |||
361 | iph = (void *)(*pskb)->data + iphdroff; | ||
362 | |||
363 | /* Manipulate protcol part. */ | ||
364 | p = nf_nat_proto_find_get(proto); | ||
365 | if (!p->manip_pkt(pskb, iphdroff, target, maniptype)) { | ||
366 | nf_nat_proto_put(p); | ||
367 | return 0; | ||
368 | } | ||
369 | nf_nat_proto_put(p); | ||
370 | |||
371 | iph = (void *)(*pskb)->data + iphdroff; | ||
372 | |||
373 | if (maniptype == IP_NAT_MANIP_SRC) { | ||
374 | nf_csum_replace4(&iph->check, iph->saddr, target->src.u3.ip); | ||
375 | iph->saddr = target->src.u3.ip; | ||
376 | } else { | ||
377 | nf_csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip); | ||
378 | iph->daddr = target->dst.u3.ip; | ||
379 | } | ||
380 | return 1; | ||
381 | } | ||
382 | |||
383 | /* Do packet manipulations according to nf_nat_setup_info. */ | ||
384 | unsigned int nf_nat_packet(struct nf_conn *ct, | ||
385 | enum ip_conntrack_info ctinfo, | ||
386 | unsigned int hooknum, | ||
387 | struct sk_buff **pskb) | ||
388 | { | ||
389 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | ||
390 | unsigned long statusbit; | ||
391 | enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); | ||
392 | |||
393 | if (mtype == IP_NAT_MANIP_SRC) | ||
394 | statusbit = IPS_SRC_NAT; | ||
395 | else | ||
396 | statusbit = IPS_DST_NAT; | ||
397 | |||
398 | /* Invert if this is reply dir. */ | ||
399 | if (dir == IP_CT_DIR_REPLY) | ||
400 | statusbit ^= IPS_NAT_MASK; | ||
401 | |||
402 | /* Non-atomic: these bits don't change. */ | ||
403 | if (ct->status & statusbit) { | ||
404 | struct nf_conntrack_tuple target; | ||
405 | |||
406 | /* We are aiming to look like inverse of other direction. */ | ||
407 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | ||
408 | |||
409 | if (!manip_pkt(target.dst.protonum, pskb, 0, &target, mtype)) | ||
410 | return NF_DROP; | ||
411 | } | ||
412 | return NF_ACCEPT; | ||
413 | } | ||
414 | EXPORT_SYMBOL_GPL(nf_nat_packet); | ||
415 | |||
416 | /* Dir is direction ICMP is coming from (opposite to packet it contains) */ | ||
417 | int nf_nat_icmp_reply_translation(struct nf_conn *ct, | ||
418 | enum ip_conntrack_info ctinfo, | ||
419 | unsigned int hooknum, | ||
420 | struct sk_buff **pskb) | ||
421 | { | ||
422 | struct { | ||
423 | struct icmphdr icmp; | ||
424 | struct iphdr ip; | ||
425 | } *inside; | ||
426 | struct nf_conntrack_tuple inner, target; | ||
427 | int hdrlen = (*pskb)->nh.iph->ihl * 4; | ||
428 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | ||
429 | unsigned long statusbit; | ||
430 | enum nf_nat_manip_type manip = HOOK2MANIP(hooknum); | ||
431 | |||
432 | if (!skb_make_writable(pskb, hdrlen + sizeof(*inside))) | ||
433 | return 0; | ||
434 | |||
435 | inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4; | ||
436 | |||
437 | /* We're actually going to mangle it beyond trivial checksum | ||
438 | adjustment, so make sure the current checksum is correct. */ | ||
439 | if (nf_ip_checksum(*pskb, hooknum, hdrlen, 0)) | ||
440 | return 0; | ||
441 | |||
442 | /* Must be RELATED */ | ||
443 | NF_CT_ASSERT((*pskb)->nfctinfo == IP_CT_RELATED || | ||
444 | (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); | ||
445 | |||
446 | /* Redirects on non-null nats must be dropped, else they'll | ||
447 | start talking to each other without our translation, and be | ||
448 | confused... --RR */ | ||
449 | if (inside->icmp.type == ICMP_REDIRECT) { | ||
450 | /* If NAT isn't finished, assume it and drop. */ | ||
451 | if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) | ||
452 | return 0; | ||
453 | |||
454 | if (ct->status & IPS_NAT_MASK) | ||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n", | ||
459 | *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); | ||
460 | |||
461 | if (!nf_ct_get_tuple(*pskb, | ||
462 | (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr), | ||
463 | (*pskb)->nh.iph->ihl*4 + | ||
464 | sizeof(struct icmphdr) + inside->ip.ihl*4, | ||
465 | (u_int16_t)AF_INET, | ||
466 | inside->ip.protocol, | ||
467 | &inner, | ||
468 | l3proto, | ||
469 | __nf_ct_l4proto_find((u_int16_t)PF_INET, | ||
470 | inside->ip.protocol))) | ||
471 | return 0; | ||
472 | |||
473 | /* Change inner back to look like incoming packet. We do the | ||
474 | opposite manip on this hook to normal, because it might not | ||
475 | pass all hooks (locally-generated ICMP). Consider incoming | ||
476 | packet: PREROUTING (DST manip), routing produces ICMP, goes | ||
477 | through POSTROUTING (which must correct the DST manip). */ | ||
478 | if (!manip_pkt(inside->ip.protocol, pskb, | ||
479 | (*pskb)->nh.iph->ihl*4 + sizeof(inside->icmp), | ||
480 | &ct->tuplehash[!dir].tuple, | ||
481 | !manip)) | ||
482 | return 0; | ||
483 | |||
484 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { | ||
485 | /* Reloading "inside" here since manip_pkt inner. */ | ||
486 | inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4; | ||
487 | inside->icmp.checksum = 0; | ||
488 | inside->icmp.checksum = | ||
489 | csum_fold(skb_checksum(*pskb, hdrlen, | ||
490 | (*pskb)->len - hdrlen, 0)); | ||
491 | } | ||
492 | |||
493 | /* Change outer to look the reply to an incoming packet | ||
494 | * (proto 0 means don't invert per-proto part). */ | ||
495 | if (manip == IP_NAT_MANIP_SRC) | ||
496 | statusbit = IPS_SRC_NAT; | ||
497 | else | ||
498 | statusbit = IPS_DST_NAT; | ||
499 | |||
500 | /* Invert if this is reply dir. */ | ||
501 | if (dir == IP_CT_DIR_REPLY) | ||
502 | statusbit ^= IPS_NAT_MASK; | ||
503 | |||
504 | if (ct->status & statusbit) { | ||
505 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | ||
506 | if (!manip_pkt(0, pskb, 0, &target, manip)) | ||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | return 1; | ||
511 | } | ||
512 | EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation); | ||
513 | |||
514 | /* Protocol registration. */ | ||
515 | int nf_nat_protocol_register(struct nf_nat_protocol *proto) | ||
516 | { | ||
517 | int ret = 0; | ||
518 | |||
519 | write_lock_bh(&nf_nat_lock); | ||
520 | if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) { | ||
521 | ret = -EBUSY; | ||
522 | goto out; | ||
523 | } | ||
524 | nf_nat_protos[proto->protonum] = proto; | ||
525 | out: | ||
526 | write_unlock_bh(&nf_nat_lock); | ||
527 | return ret; | ||
528 | } | ||
529 | EXPORT_SYMBOL(nf_nat_protocol_register); | ||
530 | |||
531 | /* Noone stores the protocol anywhere; simply delete it. */ | ||
532 | void nf_nat_protocol_unregister(struct nf_nat_protocol *proto) | ||
533 | { | ||
534 | write_lock_bh(&nf_nat_lock); | ||
535 | nf_nat_protos[proto->protonum] = &nf_nat_unknown_protocol; | ||
536 | write_unlock_bh(&nf_nat_lock); | ||
537 | |||
538 | /* Someone could be still looking at the proto in a bh. */ | ||
539 | synchronize_net(); | ||
540 | } | ||
541 | EXPORT_SYMBOL(nf_nat_protocol_unregister); | ||
542 | |||
543 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ | ||
544 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) | ||
545 | int | ||
546 | nf_nat_port_range_to_nfattr(struct sk_buff *skb, | ||
547 | const struct nf_nat_range *range) | ||
548 | { | ||
549 | NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16), | ||
550 | &range->min.tcp.port); | ||
551 | NFA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(__be16), | ||
552 | &range->max.tcp.port); | ||
553 | |||
554 | return 0; | ||
555 | |||
556 | nfattr_failure: | ||
557 | return -1; | ||
558 | } | ||
559 | EXPORT_SYMBOL_GPL(nf_nat_port_nfattr_to_range); | ||
560 | |||
561 | int | ||
562 | nf_nat_port_nfattr_to_range(struct nfattr *tb[], struct nf_nat_range *range) | ||
563 | { | ||
564 | int ret = 0; | ||
565 | |||
566 | /* we have to return whether we actually parsed something or not */ | ||
567 | |||
568 | if (tb[CTA_PROTONAT_PORT_MIN-1]) { | ||
569 | ret = 1; | ||
570 | range->min.tcp.port = | ||
571 | *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]); | ||
572 | } | ||
573 | |||
574 | if (!tb[CTA_PROTONAT_PORT_MAX-1]) { | ||
575 | if (ret) | ||
576 | range->max.tcp.port = range->min.tcp.port; | ||
577 | } else { | ||
578 | ret = 1; | ||
579 | range->max.tcp.port = | ||
580 | *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]); | ||
581 | } | ||
582 | |||
583 | return ret; | ||
584 | } | ||
585 | EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nfattr); | ||
586 | #endif | ||
587 | |||
588 | static int __init nf_nat_init(void) | ||
589 | { | ||
590 | size_t i; | ||
591 | |||
592 | /* Leave them the same for the moment. */ | ||
593 | nf_nat_htable_size = nf_conntrack_htable_size; | ||
594 | |||
595 | /* One vmalloc for both hash tables */ | ||
596 | bysource = vmalloc(sizeof(struct list_head) * nf_nat_htable_size); | ||
597 | if (!bysource) | ||
598 | return -ENOMEM; | ||
599 | |||
600 | /* Sew in builtin protocols. */ | ||
601 | write_lock_bh(&nf_nat_lock); | ||
602 | for (i = 0; i < MAX_IP_NAT_PROTO; i++) | ||
603 | nf_nat_protos[i] = &nf_nat_unknown_protocol; | ||
604 | nf_nat_protos[IPPROTO_TCP] = &nf_nat_protocol_tcp; | ||
605 | nf_nat_protos[IPPROTO_UDP] = &nf_nat_protocol_udp; | ||
606 | nf_nat_protos[IPPROTO_ICMP] = &nf_nat_protocol_icmp; | ||
607 | write_unlock_bh(&nf_nat_lock); | ||
608 | |||
609 | for (i = 0; i < nf_nat_htable_size; i++) { | ||
610 | INIT_LIST_HEAD(&bysource[i]); | ||
611 | } | ||
612 | |||
613 | /* FIXME: Man, this is a hack. <SIGH> */ | ||
614 | NF_CT_ASSERT(nf_conntrack_destroyed == NULL); | ||
615 | nf_conntrack_destroyed = &nf_nat_cleanup_conntrack; | ||
616 | |||
617 | /* Initialize fake conntrack so that NAT will skip it */ | ||
618 | nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; | ||
619 | |||
620 | l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); | ||
621 | return 0; | ||
622 | } | ||
623 | |||
624 | /* Clear NAT section of all conntracks, in case we're loaded again. */ | ||
625 | static int clean_nat(struct nf_conn *i, void *data) | ||
626 | { | ||
627 | struct nf_conn_nat *nat = nfct_nat(i); | ||
628 | |||
629 | if (!nat) | ||
630 | return 0; | ||
631 | memset(nat, 0, sizeof(nat)); | ||
632 | i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST); | ||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | static void __exit nf_nat_cleanup(void) | ||
637 | { | ||
638 | nf_ct_iterate_cleanup(&clean_nat, NULL); | ||
639 | nf_conntrack_destroyed = NULL; | ||
640 | vfree(bysource); | ||
641 | nf_ct_l3proto_put(l3proto); | ||
642 | } | ||
643 | |||
644 | MODULE_LICENSE("GPL"); | ||
645 | |||
646 | module_init(nf_nat_init); | ||
647 | module_exit(nf_nat_cleanup); | ||
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c new file mode 100644 index 000000000000..751b59801755 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_ftp.c | |||
@@ -0,0 +1,179 @@ | |||
1 | /* FTP extension for TCP NAT alteration. */ | ||
2 | |||
3 | /* (C) 1999-2001 Paul `Rusty' Russell | ||
4 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/moduleparam.h> | ||
13 | #include <linux/ip.h> | ||
14 | #include <linux/tcp.h> | ||
15 | #include <linux/netfilter_ipv4.h> | ||
16 | #include <net/netfilter/nf_nat.h> | ||
17 | #include <net/netfilter/nf_nat_helper.h> | ||
18 | #include <net/netfilter/nf_nat_rule.h> | ||
19 | #include <net/netfilter/nf_conntrack_helper.h> | ||
20 | #include <net/netfilter/nf_conntrack_expect.h> | ||
21 | #include <linux/netfilter/nf_conntrack_ftp.h> | ||
22 | |||
23 | MODULE_LICENSE("GPL"); | ||
24 | MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); | ||
25 | MODULE_DESCRIPTION("ftp NAT helper"); | ||
26 | MODULE_ALIAS("ip_nat_ftp"); | ||
27 | |||
28 | #if 0 | ||
29 | #define DEBUGP printk | ||
30 | #else | ||
31 | #define DEBUGP(format, args...) | ||
32 | #endif | ||
33 | |||
34 | /* FIXME: Time out? --RR */ | ||
35 | |||
36 | static int | ||
37 | mangle_rfc959_packet(struct sk_buff **pskb, | ||
38 | __be32 newip, | ||
39 | u_int16_t port, | ||
40 | unsigned int matchoff, | ||
41 | unsigned int matchlen, | ||
42 | struct nf_conn *ct, | ||
43 | enum ip_conntrack_info ctinfo, | ||
44 | u32 *seq) | ||
45 | { | ||
46 | char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")]; | ||
47 | |||
48 | sprintf(buffer, "%u,%u,%u,%u,%u,%u", | ||
49 | NIPQUAD(newip), port>>8, port&0xFF); | ||
50 | |||
51 | DEBUGP("calling nf_nat_mangle_tcp_packet\n"); | ||
52 | |||
53 | *seq += strlen(buffer) - matchlen; | ||
54 | return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, | ||
55 | matchlen, buffer, strlen(buffer)); | ||
56 | } | ||
57 | |||
58 | /* |1|132.235.1.2|6275| */ | ||
59 | static int | ||
60 | mangle_eprt_packet(struct sk_buff **pskb, | ||
61 | __be32 newip, | ||
62 | u_int16_t port, | ||
63 | unsigned int matchoff, | ||
64 | unsigned int matchlen, | ||
65 | struct nf_conn *ct, | ||
66 | enum ip_conntrack_info ctinfo, | ||
67 | u32 *seq) | ||
68 | { | ||
69 | char buffer[sizeof("|1|255.255.255.255|65535|")]; | ||
70 | |||
71 | sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port); | ||
72 | |||
73 | DEBUGP("calling nf_nat_mangle_tcp_packet\n"); | ||
74 | |||
75 | *seq += strlen(buffer) - matchlen; | ||
76 | return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, | ||
77 | matchlen, buffer, strlen(buffer)); | ||
78 | } | ||
79 | |||
80 | /* |1|132.235.1.2|6275| */ | ||
81 | static int | ||
82 | mangle_epsv_packet(struct sk_buff **pskb, | ||
83 | __be32 newip, | ||
84 | u_int16_t port, | ||
85 | unsigned int matchoff, | ||
86 | unsigned int matchlen, | ||
87 | struct nf_conn *ct, | ||
88 | enum ip_conntrack_info ctinfo, | ||
89 | u32 *seq) | ||
90 | { | ||
91 | char buffer[sizeof("|||65535|")]; | ||
92 | |||
93 | sprintf(buffer, "|||%u|", port); | ||
94 | |||
95 | DEBUGP("calling nf_nat_mangle_tcp_packet\n"); | ||
96 | |||
97 | *seq += strlen(buffer) - matchlen; | ||
98 | return nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, | ||
99 | matchlen, buffer, strlen(buffer)); | ||
100 | } | ||
101 | |||
102 | static int (*mangle[])(struct sk_buff **, __be32, u_int16_t, | ||
103 | unsigned int, unsigned int, struct nf_conn *, | ||
104 | enum ip_conntrack_info, u32 *seq) | ||
105 | = { | ||
106 | [NF_CT_FTP_PORT] = mangle_rfc959_packet, | ||
107 | [NF_CT_FTP_PASV] = mangle_rfc959_packet, | ||
108 | [NF_CT_FTP_EPRT] = mangle_eprt_packet, | ||
109 | [NF_CT_FTP_EPSV] = mangle_epsv_packet | ||
110 | }; | ||
111 | |||
112 | /* So, this packet has hit the connection tracking matching code. | ||
113 | Mangle it, and change the expectation to match the new version. */ | ||
114 | static unsigned int nf_nat_ftp(struct sk_buff **pskb, | ||
115 | enum ip_conntrack_info ctinfo, | ||
116 | enum nf_ct_ftp_type type, | ||
117 | unsigned int matchoff, | ||
118 | unsigned int matchlen, | ||
119 | struct nf_conntrack_expect *exp, | ||
120 | u32 *seq) | ||
121 | { | ||
122 | __be32 newip; | ||
123 | u_int16_t port; | ||
124 | int dir = CTINFO2DIR(ctinfo); | ||
125 | struct nf_conn *ct = exp->master; | ||
126 | |||
127 | DEBUGP("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen); | ||
128 | |||
129 | /* Connection will come from wherever this packet goes, hence !dir */ | ||
130 | newip = ct->tuplehash[!dir].tuple.dst.u3.ip; | ||
131 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | ||
132 | exp->dir = !dir; | ||
133 | |||
134 | /* When you see the packet, we need to NAT it the same as the | ||
135 | * this one. */ | ||
136 | exp->expectfn = nf_nat_follow_master; | ||
137 | |||
138 | /* Try to get same port: if not, try to change it. */ | ||
139 | for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { | ||
140 | exp->tuple.dst.u.tcp.port = htons(port); | ||
141 | if (nf_conntrack_expect_related(exp) == 0) | ||
142 | break; | ||
143 | } | ||
144 | |||
145 | if (port == 0) | ||
146 | return NF_DROP; | ||
147 | |||
148 | if (!mangle[type](pskb, newip, port, matchoff, matchlen, ct, ctinfo, | ||
149 | seq)) { | ||
150 | nf_conntrack_unexpect_related(exp); | ||
151 | return NF_DROP; | ||
152 | } | ||
153 | return NF_ACCEPT; | ||
154 | } | ||
155 | |||
156 | static void __exit nf_nat_ftp_fini(void) | ||
157 | { | ||
158 | rcu_assign_pointer(nf_nat_ftp_hook, NULL); | ||
159 | synchronize_rcu(); | ||
160 | } | ||
161 | |||
162 | static int __init nf_nat_ftp_init(void) | ||
163 | { | ||
164 | BUG_ON(rcu_dereference(nf_nat_ftp_hook)); | ||
165 | rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp); | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | /* Prior to 2.6.11, we had a ports param. No longer, but don't break users. */ | ||
170 | static int warn_set(const char *val, struct kernel_param *kp) | ||
171 | { | ||
172 | printk(KERN_INFO KBUILD_MODNAME | ||
173 | ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n"); | ||
174 | return 0; | ||
175 | } | ||
176 | module_param_call(ports, warn_set, NULL, NULL, 0); | ||
177 | |||
178 | module_init(nf_nat_ftp_init); | ||
179 | module_exit(nf_nat_ftp_fini); | ||
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c new file mode 100644 index 000000000000..fb9ab0114c23 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_h323.c | |||
@@ -0,0 +1,596 @@ | |||
1 | /* | ||
2 | * H.323 extension for NAT alteration. | ||
3 | * | ||
4 | * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> | ||
5 | * | ||
6 | * This source code is licensed under General Public License version 2. | ||
7 | * | ||
8 | * Based on the 'brute force' H.323 NAT module by | ||
9 | * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/moduleparam.h> | ||
14 | #include <linux/tcp.h> | ||
15 | #include <net/tcp.h> | ||
16 | |||
17 | #include <net/netfilter/nf_nat.h> | ||
18 | #include <net/netfilter/nf_nat_helper.h> | ||
19 | #include <net/netfilter/nf_nat_rule.h> | ||
20 | #include <net/netfilter/nf_conntrack_helper.h> | ||
21 | #include <net/netfilter/nf_conntrack_expect.h> | ||
22 | #include <linux/netfilter/nf_conntrack_h323.h> | ||
23 | |||
24 | #if 0 | ||
25 | #define DEBUGP printk | ||
26 | #else | ||
27 | #define DEBUGP(format, args...) | ||
28 | #endif | ||
29 | |||
30 | /****************************************************************************/ | ||
31 | static int set_addr(struct sk_buff **pskb, | ||
32 | unsigned char **data, int dataoff, | ||
33 | unsigned int addroff, __be32 ip, __be16 port) | ||
34 | { | ||
35 | enum ip_conntrack_info ctinfo; | ||
36 | struct nf_conn *ct = ip_conntrack_get(*pskb, &ctinfo); | ||
37 | struct { | ||
38 | __be32 ip; | ||
39 | __be16 port; | ||
40 | } __attribute__ ((__packed__)) buf; | ||
41 | struct tcphdr _tcph, *th; | ||
42 | |||
43 | buf.ip = ip; | ||
44 | buf.port = port; | ||
45 | addroff += dataoff; | ||
46 | |||
47 | if ((*pskb)->nh.iph->protocol == IPPROTO_TCP) { | ||
48 | if (!nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, | ||
49 | addroff, sizeof(buf), | ||
50 | (char *) &buf, sizeof(buf))) { | ||
51 | if (net_ratelimit()) | ||
52 | printk("nf_nat_h323: nf_nat_mangle_tcp_packet" | ||
53 | " error\n"); | ||
54 | return -1; | ||
55 | } | ||
56 | |||
57 | /* Relocate data pointer */ | ||
58 | th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl * 4, | ||
59 | sizeof(_tcph), &_tcph); | ||
60 | if (th == NULL) | ||
61 | return -1; | ||
62 | *data = (*pskb)->data + (*pskb)->nh.iph->ihl * 4 + | ||
63 | th->doff * 4 + dataoff; | ||
64 | } else { | ||
65 | if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, | ||
66 | addroff, sizeof(buf), | ||
67 | (char *) &buf, sizeof(buf))) { | ||
68 | if (net_ratelimit()) | ||
69 | printk("nf_nat_h323: nf_nat_mangle_udp_packet" | ||
70 | " error\n"); | ||
71 | return -1; | ||
72 | } | ||
73 | /* nf_nat_mangle_udp_packet uses skb_make_writable() to copy | ||
74 | * or pull everything in a linear buffer, so we can safely | ||
75 | * use the skb pointers now */ | ||
76 | *data = (*pskb)->data + (*pskb)->nh.iph->ihl * 4 + | ||
77 | sizeof(struct udphdr); | ||
78 | } | ||
79 | |||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | /****************************************************************************/ | ||
84 | static int set_h225_addr(struct sk_buff **pskb, | ||
85 | unsigned char **data, int dataoff, | ||
86 | TransportAddress *taddr, | ||
87 | union nf_conntrack_address *addr, __be16 port) | ||
88 | { | ||
89 | return set_addr(pskb, data, dataoff, taddr->ipAddress.ip, | ||
90 | addr->ip, port); | ||
91 | } | ||
92 | |||
93 | /****************************************************************************/ | ||
94 | static int set_h245_addr(struct sk_buff **pskb, | ||
95 | unsigned char **data, int dataoff, | ||
96 | H245_TransportAddress *taddr, | ||
97 | union nf_conntrack_address *addr, __be16 port) | ||
98 | { | ||
99 | return set_addr(pskb, data, dataoff, | ||
100 | taddr->unicastAddress.iPAddress.network, | ||
101 | addr->ip, port); | ||
102 | } | ||
103 | |||
104 | /****************************************************************************/ | ||
105 | static int set_sig_addr(struct sk_buff **pskb, struct nf_conn *ct, | ||
106 | enum ip_conntrack_info ctinfo, | ||
107 | unsigned char **data, | ||
108 | TransportAddress *taddr, int count) | ||
109 | { | ||
110 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | ||
111 | int dir = CTINFO2DIR(ctinfo); | ||
112 | int i; | ||
113 | __be16 port; | ||
114 | union nf_conntrack_address addr; | ||
115 | |||
116 | for (i = 0; i < count; i++) { | ||
117 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port)) { | ||
118 | if (addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && | ||
119 | port == info->sig_port[dir]) { | ||
120 | /* GW->GK */ | ||
121 | |||
122 | /* Fix for Gnomemeeting */ | ||
123 | if (i > 0 && | ||
124 | get_h225_addr(ct, *data, &taddr[0], | ||
125 | &addr, &port) && | ||
126 | (ntohl(addr.ip) & 0xff000000) == 0x7f000000) | ||
127 | i = 0; | ||
128 | |||
129 | DEBUGP | ||
130 | ("nf_nat_ras: set signal address " | ||
131 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | ||
132 | NIPQUAD(ip), port, | ||
133 | NIPQUAD(ct->tuplehash[!dir].tuple.dst. | ||
134 | ip), info->sig_port[!dir]); | ||
135 | return set_h225_addr(pskb, data, 0, &taddr[i], | ||
136 | &ct->tuplehash[!dir]. | ||
137 | tuple.dst.u3, | ||
138 | info->sig_port[!dir]); | ||
139 | } else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && | ||
140 | port == info->sig_port[dir]) { | ||
141 | /* GK->GW */ | ||
142 | DEBUGP | ||
143 | ("nf_nat_ras: set signal address " | ||
144 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | ||
145 | NIPQUAD(ip), port, | ||
146 | NIPQUAD(ct->tuplehash[!dir].tuple.src. | ||
147 | ip), info->sig_port[!dir]); | ||
148 | return set_h225_addr(pskb, data, 0, &taddr[i], | ||
149 | &ct->tuplehash[!dir]. | ||
150 | tuple.src.u3, | ||
151 | info->sig_port[!dir]); | ||
152 | } | ||
153 | } | ||
154 | } | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | /****************************************************************************/ | ||
160 | static int set_ras_addr(struct sk_buff **pskb, struct nf_conn *ct, | ||
161 | enum ip_conntrack_info ctinfo, | ||
162 | unsigned char **data, | ||
163 | TransportAddress *taddr, int count) | ||
164 | { | ||
165 | int dir = CTINFO2DIR(ctinfo); | ||
166 | int i; | ||
167 | __be16 port; | ||
168 | union nf_conntrack_address addr; | ||
169 | |||
170 | for (i = 0; i < count; i++) { | ||
171 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && | ||
172 | addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && | ||
173 | port == ct->tuplehash[dir].tuple.src.u.udp.port) { | ||
174 | DEBUGP("nf_nat_ras: set rasAddress " | ||
175 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | ||
176 | NIPQUAD(ip), ntohs(port), | ||
177 | NIPQUAD(ct->tuplehash[!dir].tuple.dst.u3.ip), | ||
178 | ntohs(ct->tuplehash[!dir].tuple.dst.u.udp. | ||
179 | port)); | ||
180 | return set_h225_addr(pskb, data, 0, &taddr[i], | ||
181 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
182 | ct->tuplehash[!dir].tuple. | ||
183 | dst.u.udp.port); | ||
184 | } | ||
185 | } | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | /****************************************************************************/ | ||
191 | static int nat_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct, | ||
192 | enum ip_conntrack_info ctinfo, | ||
193 | unsigned char **data, int dataoff, | ||
194 | H245_TransportAddress *taddr, | ||
195 | __be16 port, __be16 rtp_port, | ||
196 | struct nf_conntrack_expect *rtp_exp, | ||
197 | struct nf_conntrack_expect *rtcp_exp) | ||
198 | { | ||
199 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | ||
200 | int dir = CTINFO2DIR(ctinfo); | ||
201 | int i; | ||
202 | u_int16_t nated_port; | ||
203 | |||
204 | /* Set expectations for NAT */ | ||
205 | rtp_exp->saved_proto.udp.port = rtp_exp->tuple.dst.u.udp.port; | ||
206 | rtp_exp->expectfn = nf_nat_follow_master; | ||
207 | rtp_exp->dir = !dir; | ||
208 | rtcp_exp->saved_proto.udp.port = rtcp_exp->tuple.dst.u.udp.port; | ||
209 | rtcp_exp->expectfn = nf_nat_follow_master; | ||
210 | rtcp_exp->dir = !dir; | ||
211 | |||
212 | /* Lookup existing expects */ | ||
213 | for (i = 0; i < H323_RTP_CHANNEL_MAX; i++) { | ||
214 | if (info->rtp_port[i][dir] == rtp_port) { | ||
215 | /* Expected */ | ||
216 | |||
217 | /* Use allocated ports first. This will refresh | ||
218 | * the expects */ | ||
219 | rtp_exp->tuple.dst.u.udp.port = info->rtp_port[i][dir]; | ||
220 | rtcp_exp->tuple.dst.u.udp.port = | ||
221 | htons(ntohs(info->rtp_port[i][dir]) + 1); | ||
222 | break; | ||
223 | } else if (info->rtp_port[i][dir] == 0) { | ||
224 | /* Not expected */ | ||
225 | break; | ||
226 | } | ||
227 | } | ||
228 | |||
229 | /* Run out of expectations */ | ||
230 | if (i >= H323_RTP_CHANNEL_MAX) { | ||
231 | if (net_ratelimit()) | ||
232 | printk("nf_nat_h323: out of expectations\n"); | ||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | /* Try to get a pair of ports. */ | ||
237 | for (nated_port = ntohs(rtp_exp->tuple.dst.u.udp.port); | ||
238 | nated_port != 0; nated_port += 2) { | ||
239 | rtp_exp->tuple.dst.u.udp.port = htons(nated_port); | ||
240 | if (nf_conntrack_expect_related(rtp_exp) == 0) { | ||
241 | rtcp_exp->tuple.dst.u.udp.port = | ||
242 | htons(nated_port + 1); | ||
243 | if (nf_conntrack_expect_related(rtcp_exp) == 0) | ||
244 | break; | ||
245 | nf_conntrack_unexpect_related(rtp_exp); | ||
246 | } | ||
247 | } | ||
248 | |||
249 | if (nated_port == 0) { /* No port available */ | ||
250 | if (net_ratelimit()) | ||
251 | printk("nf_nat_h323: out of RTP ports\n"); | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | /* Modify signal */ | ||
256 | if (set_h245_addr(pskb, data, dataoff, taddr, | ||
257 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
258 | htons((port & htons(1)) ? nated_port + 1 : | ||
259 | nated_port)) == 0) { | ||
260 | /* Save ports */ | ||
261 | info->rtp_port[i][dir] = rtp_port; | ||
262 | info->rtp_port[i][!dir] = htons(nated_port); | ||
263 | } else { | ||
264 | nf_conntrack_unexpect_related(rtp_exp); | ||
265 | nf_conntrack_unexpect_related(rtcp_exp); | ||
266 | return -1; | ||
267 | } | ||
268 | |||
269 | /* Success */ | ||
270 | DEBUGP("nf_nat_h323: expect RTP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | ||
271 | NIPQUAD(rtp_exp->tuple.src.ip), | ||
272 | ntohs(rtp_exp->tuple.src.u.udp.port), | ||
273 | NIPQUAD(rtp_exp->tuple.dst.ip), | ||
274 | ntohs(rtp_exp->tuple.dst.u.udp.port)); | ||
275 | DEBUGP("nf_nat_h323: expect RTCP %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | ||
276 | NIPQUAD(rtcp_exp->tuple.src.ip), | ||
277 | ntohs(rtcp_exp->tuple.src.u.udp.port), | ||
278 | NIPQUAD(rtcp_exp->tuple.dst.ip), | ||
279 | ntohs(rtcp_exp->tuple.dst.u.udp.port)); | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | /****************************************************************************/ | ||
285 | static int nat_t120(struct sk_buff **pskb, struct nf_conn *ct, | ||
286 | enum ip_conntrack_info ctinfo, | ||
287 | unsigned char **data, int dataoff, | ||
288 | H245_TransportAddress *taddr, __be16 port, | ||
289 | struct nf_conntrack_expect *exp) | ||
290 | { | ||
291 | int dir = CTINFO2DIR(ctinfo); | ||
292 | u_int16_t nated_port = ntohs(port); | ||
293 | |||
294 | /* Set expectations for NAT */ | ||
295 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | ||
296 | exp->expectfn = nf_nat_follow_master; | ||
297 | exp->dir = !dir; | ||
298 | |||
299 | /* Try to get same port: if not, try to change it. */ | ||
300 | for (; nated_port != 0; nated_port++) { | ||
301 | exp->tuple.dst.u.tcp.port = htons(nated_port); | ||
302 | if (nf_conntrack_expect_related(exp) == 0) | ||
303 | break; | ||
304 | } | ||
305 | |||
306 | if (nated_port == 0) { /* No port available */ | ||
307 | if (net_ratelimit()) | ||
308 | printk("nf_nat_h323: out of TCP ports\n"); | ||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | /* Modify signal */ | ||
313 | if (set_h245_addr(pskb, data, dataoff, taddr, | ||
314 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
315 | htons(nated_port)) < 0) { | ||
316 | nf_conntrack_unexpect_related(exp); | ||
317 | return -1; | ||
318 | } | ||
319 | |||
320 | DEBUGP("nf_nat_h323: expect T.120 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | ||
321 | NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), | ||
322 | NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); | ||
323 | |||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | /****************************************************************************/ | ||
328 | static int nat_h245(struct sk_buff **pskb, struct nf_conn *ct, | ||
329 | enum ip_conntrack_info ctinfo, | ||
330 | unsigned char **data, int dataoff, | ||
331 | TransportAddress *taddr, __be16 port, | ||
332 | struct nf_conntrack_expect *exp) | ||
333 | { | ||
334 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | ||
335 | int dir = CTINFO2DIR(ctinfo); | ||
336 | u_int16_t nated_port = ntohs(port); | ||
337 | |||
338 | /* Set expectations for NAT */ | ||
339 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | ||
340 | exp->expectfn = nf_nat_follow_master; | ||
341 | exp->dir = !dir; | ||
342 | |||
343 | /* Check existing expects */ | ||
344 | if (info->sig_port[dir] == port) | ||
345 | nated_port = ntohs(info->sig_port[!dir]); | ||
346 | |||
347 | /* Try to get same port: if not, try to change it. */ | ||
348 | for (; nated_port != 0; nated_port++) { | ||
349 | exp->tuple.dst.u.tcp.port = htons(nated_port); | ||
350 | if (nf_conntrack_expect_related(exp) == 0) | ||
351 | break; | ||
352 | } | ||
353 | |||
354 | if (nated_port == 0) { /* No port available */ | ||
355 | if (net_ratelimit()) | ||
356 | printk("nf_nat_q931: out of TCP ports\n"); | ||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | /* Modify signal */ | ||
361 | if (set_h225_addr(pskb, data, dataoff, taddr, | ||
362 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
363 | htons(nated_port)) == 0) { | ||
364 | /* Save ports */ | ||
365 | info->sig_port[dir] = port; | ||
366 | info->sig_port[!dir] = htons(nated_port); | ||
367 | } else { | ||
368 | nf_conntrack_unexpect_related(exp); | ||
369 | return -1; | ||
370 | } | ||
371 | |||
372 | DEBUGP("nf_nat_q931: expect H.245 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | ||
373 | NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), | ||
374 | NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); | ||
375 | |||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | /**************************************************************************** | ||
380 | * This conntrack expect function replaces nf_conntrack_q931_expect() | ||
381 | * which was set by nf_conntrack_h323.c. | ||
382 | ****************************************************************************/ | ||
383 | static void ip_nat_q931_expect(struct nf_conn *new, | ||
384 | struct nf_conntrack_expect *this) | ||
385 | { | ||
386 | struct ip_nat_range range; | ||
387 | |||
388 | if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */ | ||
389 | nf_nat_follow_master(new, this); | ||
390 | return; | ||
391 | } | ||
392 | |||
393 | /* This must be a fresh one. */ | ||
394 | BUG_ON(new->status & IPS_NAT_DONE_MASK); | ||
395 | |||
396 | /* Change src to where master sends to */ | ||
397 | range.flags = IP_NAT_RANGE_MAP_IPS; | ||
398 | range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; | ||
399 | |||
400 | /* hook doesn't matter, but it has to do source manip */ | ||
401 | nf_nat_setup_info(new, &range, NF_IP_POST_ROUTING); | ||
402 | |||
403 | /* For DST manip, map port here to where it's expected. */ | ||
404 | range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); | ||
405 | range.min = range.max = this->saved_proto; | ||
406 | range.min_ip = range.max_ip = | ||
407 | new->master->tuplehash[!this->dir].tuple.src.u3.ip; | ||
408 | |||
409 | /* hook doesn't matter, but it has to do destination manip */ | ||
410 | nf_nat_setup_info(new, &range, NF_IP_PRE_ROUTING); | ||
411 | } | ||
412 | |||
413 | /****************************************************************************/ | ||
414 | static int nat_q931(struct sk_buff **pskb, struct nf_conn *ct, | ||
415 | enum ip_conntrack_info ctinfo, | ||
416 | unsigned char **data, TransportAddress *taddr, int idx, | ||
417 | __be16 port, struct nf_conntrack_expect *exp) | ||
418 | { | ||
419 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | ||
420 | int dir = CTINFO2DIR(ctinfo); | ||
421 | u_int16_t nated_port = ntohs(port); | ||
422 | union nf_conntrack_address addr; | ||
423 | |||
424 | /* Set expectations for NAT */ | ||
425 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | ||
426 | exp->expectfn = ip_nat_q931_expect; | ||
427 | exp->dir = !dir; | ||
428 | |||
429 | /* Check existing expects */ | ||
430 | if (info->sig_port[dir] == port) | ||
431 | nated_port = ntohs(info->sig_port[!dir]); | ||
432 | |||
433 | /* Try to get same port: if not, try to change it. */ | ||
434 | for (; nated_port != 0; nated_port++) { | ||
435 | exp->tuple.dst.u.tcp.port = htons(nated_port); | ||
436 | if (nf_conntrack_expect_related(exp) == 0) | ||
437 | break; | ||
438 | } | ||
439 | |||
440 | if (nated_port == 0) { /* No port available */ | ||
441 | if (net_ratelimit()) | ||
442 | printk("nf_nat_ras: out of TCP ports\n"); | ||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | /* Modify signal */ | ||
447 | if (set_h225_addr(pskb, data, 0, &taddr[idx], | ||
448 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
449 | htons(nated_port)) == 0) { | ||
450 | /* Save ports */ | ||
451 | info->sig_port[dir] = port; | ||
452 | info->sig_port[!dir] = htons(nated_port); | ||
453 | |||
454 | /* Fix for Gnomemeeting */ | ||
455 | if (idx > 0 && | ||
456 | get_h225_addr(ct, *data, &taddr[0], &addr, &port) && | ||
457 | (ntohl(addr.ip) & 0xff000000) == 0x7f000000) { | ||
458 | set_h225_addr_hook(pskb, data, 0, &taddr[0], | ||
459 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
460 | info->sig_port[!dir]); | ||
461 | } | ||
462 | } else { | ||
463 | nf_conntrack_unexpect_related(exp); | ||
464 | return -1; | ||
465 | } | ||
466 | |||
467 | /* Success */ | ||
468 | DEBUGP("nf_nat_ras: expect Q.931 %u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | ||
469 | NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), | ||
470 | NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); | ||
471 | |||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | /****************************************************************************/ | ||
476 | static void ip_nat_callforwarding_expect(struct nf_conn *new, | ||
477 | struct nf_conntrack_expect *this) | ||
478 | { | ||
479 | struct nf_nat_range range; | ||
480 | |||
481 | /* This must be a fresh one. */ | ||
482 | BUG_ON(new->status & IPS_NAT_DONE_MASK); | ||
483 | |||
484 | /* Change src to where master sends to */ | ||
485 | range.flags = IP_NAT_RANGE_MAP_IPS; | ||
486 | range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; | ||
487 | |||
488 | /* hook doesn't matter, but it has to do source manip */ | ||
489 | nf_nat_setup_info(new, &range, NF_IP_POST_ROUTING); | ||
490 | |||
491 | /* For DST manip, map port here to where it's expected. */ | ||
492 | range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); | ||
493 | range.min = range.max = this->saved_proto; | ||
494 | range.min_ip = range.max_ip = this->saved_ip; | ||
495 | |||
496 | /* hook doesn't matter, but it has to do destination manip */ | ||
497 | nf_nat_setup_info(new, &range, NF_IP_PRE_ROUTING); | ||
498 | } | ||
499 | |||
500 | /****************************************************************************/ | ||
501 | static int nat_callforwarding(struct sk_buff **pskb, struct nf_conn *ct, | ||
502 | enum ip_conntrack_info ctinfo, | ||
503 | unsigned char **data, int dataoff, | ||
504 | TransportAddress *taddr, __be16 port, | ||
505 | struct nf_conntrack_expect *exp) | ||
506 | { | ||
507 | int dir = CTINFO2DIR(ctinfo); | ||
508 | u_int16_t nated_port; | ||
509 | |||
510 | /* Set expectations for NAT */ | ||
511 | exp->saved_ip = exp->tuple.dst.u3.ip; | ||
512 | exp->tuple.dst.u3.ip = ct->tuplehash[!dir].tuple.dst.u3.ip; | ||
513 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | ||
514 | exp->expectfn = ip_nat_callforwarding_expect; | ||
515 | exp->dir = !dir; | ||
516 | |||
517 | /* Try to get same port: if not, try to change it. */ | ||
518 | for (nated_port = ntohs(port); nated_port != 0; nated_port++) { | ||
519 | exp->tuple.dst.u.tcp.port = htons(nated_port); | ||
520 | if (nf_conntrack_expect_related(exp) == 0) | ||
521 | break; | ||
522 | } | ||
523 | |||
524 | if (nated_port == 0) { /* No port available */ | ||
525 | if (net_ratelimit()) | ||
526 | printk("nf_nat_q931: out of TCP ports\n"); | ||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | /* Modify signal */ | ||
531 | if (!set_h225_addr(pskb, data, dataoff, taddr, | ||
532 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
533 | htons(nated_port)) == 0) { | ||
534 | nf_conntrack_unexpect_related(exp); | ||
535 | return -1; | ||
536 | } | ||
537 | |||
538 | /* Success */ | ||
539 | DEBUGP("nf_nat_q931: expect Call Forwarding " | ||
540 | "%u.%u.%u.%u:%hu->%u.%u.%u.%u:%hu\n", | ||
541 | NIPQUAD(exp->tuple.src.ip), ntohs(exp->tuple.src.u.tcp.port), | ||
542 | NIPQUAD(exp->tuple.dst.ip), ntohs(exp->tuple.dst.u.tcp.port)); | ||
543 | |||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | /****************************************************************************/ | ||
548 | static int __init init(void) | ||
549 | { | ||
550 | BUG_ON(rcu_dereference(set_h245_addr_hook) != NULL); | ||
551 | BUG_ON(rcu_dereference(set_h225_addr_hook) != NULL); | ||
552 | BUG_ON(rcu_dereference(set_sig_addr_hook) != NULL); | ||
553 | BUG_ON(rcu_dereference(set_ras_addr_hook) != NULL); | ||
554 | BUG_ON(rcu_dereference(nat_rtp_rtcp_hook) != NULL); | ||
555 | BUG_ON(rcu_dereference(nat_t120_hook) != NULL); | ||
556 | BUG_ON(rcu_dereference(nat_h245_hook) != NULL); | ||
557 | BUG_ON(rcu_dereference(nat_callforwarding_hook) != NULL); | ||
558 | BUG_ON(rcu_dereference(nat_q931_hook) != NULL); | ||
559 | |||
560 | rcu_assign_pointer(set_h245_addr_hook, set_h245_addr); | ||
561 | rcu_assign_pointer(set_h225_addr_hook, set_h225_addr); | ||
562 | rcu_assign_pointer(set_sig_addr_hook, set_sig_addr); | ||
563 | rcu_assign_pointer(set_ras_addr_hook, set_ras_addr); | ||
564 | rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp); | ||
565 | rcu_assign_pointer(nat_t120_hook, nat_t120); | ||
566 | rcu_assign_pointer(nat_h245_hook, nat_h245); | ||
567 | rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding); | ||
568 | rcu_assign_pointer(nat_q931_hook, nat_q931); | ||
569 | |||
570 | DEBUGP("nf_nat_h323: init success\n"); | ||
571 | return 0; | ||
572 | } | ||
573 | |||
574 | /****************************************************************************/ | ||
575 | static void __exit fini(void) | ||
576 | { | ||
577 | rcu_assign_pointer(set_h245_addr_hook, NULL); | ||
578 | rcu_assign_pointer(set_h225_addr_hook, NULL); | ||
579 | rcu_assign_pointer(set_sig_addr_hook, NULL); | ||
580 | rcu_assign_pointer(set_ras_addr_hook, NULL); | ||
581 | rcu_assign_pointer(nat_rtp_rtcp_hook, NULL); | ||
582 | rcu_assign_pointer(nat_t120_hook, NULL); | ||
583 | rcu_assign_pointer(nat_h245_hook, NULL); | ||
584 | rcu_assign_pointer(nat_callforwarding_hook, NULL); | ||
585 | rcu_assign_pointer(nat_q931_hook, NULL); | ||
586 | synchronize_rcu(); | ||
587 | } | ||
588 | |||
589 | /****************************************************************************/ | ||
590 | module_init(init); | ||
591 | module_exit(fini); | ||
592 | |||
593 | MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>"); | ||
594 | MODULE_DESCRIPTION("H.323 NAT helper"); | ||
595 | MODULE_LICENSE("GPL"); | ||
596 | MODULE_ALIAS("ip_nat_h323"); | ||
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c new file mode 100644 index 000000000000..98fbfc84d183 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_helper.c | |||
@@ -0,0 +1,433 @@ | |||
1 | /* ip_nat_helper.c - generic support functions for NAT helpers | ||
2 | * | ||
3 | * (C) 2000-2002 Harald Welte <laforge@netfilter.org> | ||
4 | * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/kmod.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/timer.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/tcp.h> | ||
16 | #include <linux/udp.h> | ||
17 | #include <net/checksum.h> | ||
18 | #include <net/tcp.h> | ||
19 | |||
20 | #include <linux/netfilter_ipv4.h> | ||
21 | #include <net/netfilter/nf_conntrack.h> | ||
22 | #include <net/netfilter/nf_conntrack_helper.h> | ||
23 | #include <net/netfilter/nf_conntrack_expect.h> | ||
24 | #include <net/netfilter/nf_nat.h> | ||
25 | #include <net/netfilter/nf_nat_protocol.h> | ||
26 | #include <net/netfilter/nf_nat_core.h> | ||
27 | #include <net/netfilter/nf_nat_helper.h> | ||
28 | |||
29 | #if 0 | ||
30 | #define DEBUGP printk | ||
31 | #define DUMP_OFFSET(x) printk("offset_before=%d, offset_after=%d, correction_pos=%u\n", x->offset_before, x->offset_after, x->correction_pos); | ||
32 | #else | ||
33 | #define DEBUGP(format, args...) | ||
34 | #define DUMP_OFFSET(x) | ||
35 | #endif | ||
36 | |||
37 | static DEFINE_SPINLOCK(nf_nat_seqofs_lock); | ||
38 | |||
39 | /* Setup TCP sequence correction given this change at this sequence */ | ||
40 | static inline void | ||
41 | adjust_tcp_sequence(u32 seq, | ||
42 | int sizediff, | ||
43 | struct nf_conn *ct, | ||
44 | enum ip_conntrack_info ctinfo) | ||
45 | { | ||
46 | int dir; | ||
47 | struct nf_nat_seq *this_way, *other_way; | ||
48 | struct nf_conn_nat *nat = nfct_nat(ct); | ||
49 | |||
50 | DEBUGP("nf_nat_resize_packet: old_size = %u, new_size = %u\n", | ||
51 | (*skb)->len, new_size); | ||
52 | |||
53 | dir = CTINFO2DIR(ctinfo); | ||
54 | |||
55 | this_way = &nat->info.seq[dir]; | ||
56 | other_way = &nat->info.seq[!dir]; | ||
57 | |||
58 | DEBUGP("nf_nat_resize_packet: Seq_offset before: "); | ||
59 | DUMP_OFFSET(this_way); | ||
60 | |||
61 | spin_lock_bh(&nf_nat_seqofs_lock); | ||
62 | |||
63 | /* SYN adjust. If it's uninitialized, or this is after last | ||
64 | * correction, record it: we don't handle more than one | ||
65 | * adjustment in the window, but do deal with common case of a | ||
66 | * retransmit */ | ||
67 | if (this_way->offset_before == this_way->offset_after || | ||
68 | before(this_way->correction_pos, seq)) { | ||
69 | this_way->correction_pos = seq; | ||
70 | this_way->offset_before = this_way->offset_after; | ||
71 | this_way->offset_after += sizediff; | ||
72 | } | ||
73 | spin_unlock_bh(&nf_nat_seqofs_lock); | ||
74 | |||
75 | DEBUGP("nf_nat_resize_packet: Seq_offset after: "); | ||
76 | DUMP_OFFSET(this_way); | ||
77 | } | ||
78 | |||
79 | /* Frobs data inside this packet, which is linear. */ | ||
80 | static void mangle_contents(struct sk_buff *skb, | ||
81 | unsigned int dataoff, | ||
82 | unsigned int match_offset, | ||
83 | unsigned int match_len, | ||
84 | const char *rep_buffer, | ||
85 | unsigned int rep_len) | ||
86 | { | ||
87 | unsigned char *data; | ||
88 | |||
89 | BUG_ON(skb_is_nonlinear(skb)); | ||
90 | data = (unsigned char *)skb->nh.iph + dataoff; | ||
91 | |||
92 | /* move post-replacement */ | ||
93 | memmove(data + match_offset + rep_len, | ||
94 | data + match_offset + match_len, | ||
95 | skb->tail - (data + match_offset + match_len)); | ||
96 | |||
97 | /* insert data from buffer */ | ||
98 | memcpy(data + match_offset, rep_buffer, rep_len); | ||
99 | |||
100 | /* update skb info */ | ||
101 | if (rep_len > match_len) { | ||
102 | DEBUGP("nf_nat_mangle_packet: Extending packet by " | ||
103 | "%u from %u bytes\n", rep_len - match_len, | ||
104 | skb->len); | ||
105 | skb_put(skb, rep_len - match_len); | ||
106 | } else { | ||
107 | DEBUGP("nf_nat_mangle_packet: Shrinking packet from " | ||
108 | "%u from %u bytes\n", match_len - rep_len, | ||
109 | skb->len); | ||
110 | __skb_trim(skb, skb->len + rep_len - match_len); | ||
111 | } | ||
112 | |||
113 | /* fix IP hdr checksum information */ | ||
114 | skb->nh.iph->tot_len = htons(skb->len); | ||
115 | ip_send_check(skb->nh.iph); | ||
116 | } | ||
117 | |||
118 | /* Unusual, but possible case. */ | ||
119 | static int enlarge_skb(struct sk_buff **pskb, unsigned int extra) | ||
120 | { | ||
121 | struct sk_buff *nskb; | ||
122 | |||
123 | if ((*pskb)->len + extra > 65535) | ||
124 | return 0; | ||
125 | |||
126 | nskb = skb_copy_expand(*pskb, skb_headroom(*pskb), extra, GFP_ATOMIC); | ||
127 | if (!nskb) | ||
128 | return 0; | ||
129 | |||
130 | /* Transfer socket to new skb. */ | ||
131 | if ((*pskb)->sk) | ||
132 | skb_set_owner_w(nskb, (*pskb)->sk); | ||
133 | kfree_skb(*pskb); | ||
134 | *pskb = nskb; | ||
135 | return 1; | ||
136 | } | ||
137 | |||
138 | /* Generic function for mangling variable-length address changes inside | ||
139 | * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX | ||
140 | * command in FTP). | ||
141 | * | ||
142 | * Takes care about all the nasty sequence number changes, checksumming, | ||
143 | * skb enlargement, ... | ||
144 | * | ||
145 | * */ | ||
146 | int | ||
147 | nf_nat_mangle_tcp_packet(struct sk_buff **pskb, | ||
148 | struct nf_conn *ct, | ||
149 | enum ip_conntrack_info ctinfo, | ||
150 | unsigned int match_offset, | ||
151 | unsigned int match_len, | ||
152 | const char *rep_buffer, | ||
153 | unsigned int rep_len) | ||
154 | { | ||
155 | struct iphdr *iph; | ||
156 | struct tcphdr *tcph; | ||
157 | int oldlen, datalen; | ||
158 | |||
159 | if (!skb_make_writable(pskb, (*pskb)->len)) | ||
160 | return 0; | ||
161 | |||
162 | if (rep_len > match_len && | ||
163 | rep_len - match_len > skb_tailroom(*pskb) && | ||
164 | !enlarge_skb(pskb, rep_len - match_len)) | ||
165 | return 0; | ||
166 | |||
167 | SKB_LINEAR_ASSERT(*pskb); | ||
168 | |||
169 | iph = (*pskb)->nh.iph; | ||
170 | tcph = (void *)iph + iph->ihl*4; | ||
171 | |||
172 | oldlen = (*pskb)->len - iph->ihl*4; | ||
173 | mangle_contents(*pskb, iph->ihl*4 + tcph->doff*4, | ||
174 | match_offset, match_len, rep_buffer, rep_len); | ||
175 | |||
176 | datalen = (*pskb)->len - iph->ihl*4; | ||
177 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { | ||
178 | tcph->check = 0; | ||
179 | tcph->check = tcp_v4_check(tcph, datalen, | ||
180 | iph->saddr, iph->daddr, | ||
181 | csum_partial((char *)tcph, | ||
182 | datalen, 0)); | ||
183 | } else | ||
184 | nf_proto_csum_replace2(&tcph->check, *pskb, | ||
185 | htons(oldlen), htons(datalen), 1); | ||
186 | |||
187 | if (rep_len != match_len) { | ||
188 | set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); | ||
189 | adjust_tcp_sequence(ntohl(tcph->seq), | ||
190 | (int)rep_len - (int)match_len, | ||
191 | ct, ctinfo); | ||
192 | /* Tell TCP window tracking about seq change */ | ||
193 | nf_conntrack_tcp_update(*pskb, (*pskb)->nh.iph->ihl*4, | ||
194 | ct, CTINFO2DIR(ctinfo)); | ||
195 | } | ||
196 | return 1; | ||
197 | } | ||
198 | EXPORT_SYMBOL(nf_nat_mangle_tcp_packet); | ||
199 | |||
200 | /* Generic function for mangling variable-length address changes inside | ||
201 | * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX | ||
202 | * command in the Amanda protocol) | ||
203 | * | ||
204 | * Takes care about all the nasty sequence number changes, checksumming, | ||
205 | * skb enlargement, ... | ||
206 | * | ||
207 | * XXX - This function could be merged with nf_nat_mangle_tcp_packet which | ||
208 | * should be fairly easy to do. | ||
209 | */ | ||
210 | int | ||
211 | nf_nat_mangle_udp_packet(struct sk_buff **pskb, | ||
212 | struct nf_conn *ct, | ||
213 | enum ip_conntrack_info ctinfo, | ||
214 | unsigned int match_offset, | ||
215 | unsigned int match_len, | ||
216 | const char *rep_buffer, | ||
217 | unsigned int rep_len) | ||
218 | { | ||
219 | struct iphdr *iph; | ||
220 | struct udphdr *udph; | ||
221 | int datalen, oldlen; | ||
222 | |||
223 | /* UDP helpers might accidentally mangle the wrong packet */ | ||
224 | iph = (*pskb)->nh.iph; | ||
225 | if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) + | ||
226 | match_offset + match_len) | ||
227 | return 0; | ||
228 | |||
229 | if (!skb_make_writable(pskb, (*pskb)->len)) | ||
230 | return 0; | ||
231 | |||
232 | if (rep_len > match_len && | ||
233 | rep_len - match_len > skb_tailroom(*pskb) && | ||
234 | !enlarge_skb(pskb, rep_len - match_len)) | ||
235 | return 0; | ||
236 | |||
237 | iph = (*pskb)->nh.iph; | ||
238 | udph = (void *)iph + iph->ihl*4; | ||
239 | |||
240 | oldlen = (*pskb)->len - iph->ihl*4; | ||
241 | mangle_contents(*pskb, iph->ihl*4 + sizeof(*udph), | ||
242 | match_offset, match_len, rep_buffer, rep_len); | ||
243 | |||
244 | /* update the length of the UDP packet */ | ||
245 | datalen = (*pskb)->len - iph->ihl*4; | ||
246 | udph->len = htons(datalen); | ||
247 | |||
248 | /* fix udp checksum if udp checksum was previously calculated */ | ||
249 | if (!udph->check && (*pskb)->ip_summed != CHECKSUM_PARTIAL) | ||
250 | return 1; | ||
251 | |||
252 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { | ||
253 | udph->check = 0; | ||
254 | udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
255 | datalen, IPPROTO_UDP, | ||
256 | csum_partial((char *)udph, | ||
257 | datalen, 0)); | ||
258 | if (!udph->check) | ||
259 | udph->check = CSUM_MANGLED_0; | ||
260 | } else | ||
261 | nf_proto_csum_replace2(&udph->check, *pskb, | ||
262 | htons(oldlen), htons(datalen), 1); | ||
263 | |||
264 | return 1; | ||
265 | } | ||
266 | EXPORT_SYMBOL(nf_nat_mangle_udp_packet); | ||
267 | |||
268 | /* Adjust one found SACK option including checksum correction */ | ||
269 | static void | ||
270 | sack_adjust(struct sk_buff *skb, | ||
271 | struct tcphdr *tcph, | ||
272 | unsigned int sackoff, | ||
273 | unsigned int sackend, | ||
274 | struct nf_nat_seq *natseq) | ||
275 | { | ||
276 | while (sackoff < sackend) { | ||
277 | struct tcp_sack_block_wire *sack; | ||
278 | __be32 new_start_seq, new_end_seq; | ||
279 | |||
280 | sack = (void *)skb->data + sackoff; | ||
281 | if (after(ntohl(sack->start_seq) - natseq->offset_before, | ||
282 | natseq->correction_pos)) | ||
283 | new_start_seq = htonl(ntohl(sack->start_seq) | ||
284 | - natseq->offset_after); | ||
285 | else | ||
286 | new_start_seq = htonl(ntohl(sack->start_seq) | ||
287 | - natseq->offset_before); | ||
288 | |||
289 | if (after(ntohl(sack->end_seq) - natseq->offset_before, | ||
290 | natseq->correction_pos)) | ||
291 | new_end_seq = htonl(ntohl(sack->end_seq) | ||
292 | - natseq->offset_after); | ||
293 | else | ||
294 | new_end_seq = htonl(ntohl(sack->end_seq) | ||
295 | - natseq->offset_before); | ||
296 | |||
297 | DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n", | ||
298 | ntohl(sack->start_seq), new_start_seq, | ||
299 | ntohl(sack->end_seq), new_end_seq); | ||
300 | |||
301 | nf_proto_csum_replace4(&tcph->check, skb, | ||
302 | sack->start_seq, new_start_seq, 0); | ||
303 | nf_proto_csum_replace4(&tcph->check, skb, | ||
304 | sack->end_seq, new_end_seq, 0); | ||
305 | sack->start_seq = new_start_seq; | ||
306 | sack->end_seq = new_end_seq; | ||
307 | sackoff += sizeof(*sack); | ||
308 | } | ||
309 | } | ||
310 | |||
311 | /* TCP SACK sequence number adjustment */ | ||
312 | static inline unsigned int | ||
313 | nf_nat_sack_adjust(struct sk_buff **pskb, | ||
314 | struct tcphdr *tcph, | ||
315 | struct nf_conn *ct, | ||
316 | enum ip_conntrack_info ctinfo) | ||
317 | { | ||
318 | unsigned int dir, optoff, optend; | ||
319 | struct nf_conn_nat *nat = nfct_nat(ct); | ||
320 | |||
321 | optoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct tcphdr); | ||
322 | optend = (*pskb)->nh.iph->ihl*4 + tcph->doff*4; | ||
323 | |||
324 | if (!skb_make_writable(pskb, optend)) | ||
325 | return 0; | ||
326 | |||
327 | dir = CTINFO2DIR(ctinfo); | ||
328 | |||
329 | while (optoff < optend) { | ||
330 | /* Usually: option, length. */ | ||
331 | unsigned char *op = (*pskb)->data + optoff; | ||
332 | |||
333 | switch (op[0]) { | ||
334 | case TCPOPT_EOL: | ||
335 | return 1; | ||
336 | case TCPOPT_NOP: | ||
337 | optoff++; | ||
338 | continue; | ||
339 | default: | ||
340 | /* no partial options */ | ||
341 | if (optoff + 1 == optend || | ||
342 | optoff + op[1] > optend || | ||
343 | op[1] < 2) | ||
344 | return 0; | ||
345 | if (op[0] == TCPOPT_SACK && | ||
346 | op[1] >= 2+TCPOLEN_SACK_PERBLOCK && | ||
347 | ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0) | ||
348 | sack_adjust(*pskb, tcph, optoff+2, | ||
349 | optoff+op[1], | ||
350 | &nat->info.seq[!dir]); | ||
351 | optoff += op[1]; | ||
352 | } | ||
353 | } | ||
354 | return 1; | ||
355 | } | ||
356 | |||
357 | /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */ | ||
358 | int | ||
359 | nf_nat_seq_adjust(struct sk_buff **pskb, | ||
360 | struct nf_conn *ct, | ||
361 | enum ip_conntrack_info ctinfo) | ||
362 | { | ||
363 | struct tcphdr *tcph; | ||
364 | int dir; | ||
365 | __be32 newseq, newack; | ||
366 | struct nf_conn_nat *nat = nfct_nat(ct); | ||
367 | struct nf_nat_seq *this_way, *other_way; | ||
368 | |||
369 | dir = CTINFO2DIR(ctinfo); | ||
370 | |||
371 | this_way = &nat->info.seq[dir]; | ||
372 | other_way = &nat->info.seq[!dir]; | ||
373 | |||
374 | if (!skb_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph))) | ||
375 | return 0; | ||
376 | |||
377 | tcph = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4; | ||
378 | if (after(ntohl(tcph->seq), this_way->correction_pos)) | ||
379 | newseq = htonl(ntohl(tcph->seq) + this_way->offset_after); | ||
380 | else | ||
381 | newseq = htonl(ntohl(tcph->seq) + this_way->offset_before); | ||
382 | |||
383 | if (after(ntohl(tcph->ack_seq) - other_way->offset_before, | ||
384 | other_way->correction_pos)) | ||
385 | newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_after); | ||
386 | else | ||
387 | newack = htonl(ntohl(tcph->ack_seq) - other_way->offset_before); | ||
388 | |||
389 | nf_proto_csum_replace4(&tcph->check, *pskb, tcph->seq, newseq, 0); | ||
390 | nf_proto_csum_replace4(&tcph->check, *pskb, tcph->ack_seq, newack, 0); | ||
391 | |||
392 | DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n", | ||
393 | ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq), | ||
394 | ntohl(newack)); | ||
395 | |||
396 | tcph->seq = newseq; | ||
397 | tcph->ack_seq = newack; | ||
398 | |||
399 | if (!nf_nat_sack_adjust(pskb, tcph, ct, ctinfo)) | ||
400 | return 0; | ||
401 | |||
402 | nf_conntrack_tcp_update(*pskb, (*pskb)->nh.iph->ihl*4, ct, dir); | ||
403 | |||
404 | return 1; | ||
405 | } | ||
406 | EXPORT_SYMBOL(nf_nat_seq_adjust); | ||
407 | |||
408 | /* Setup NAT on this expected conntrack so it follows master. */ | ||
409 | /* If we fail to get a free NAT slot, we'll get dropped on confirm */ | ||
410 | void nf_nat_follow_master(struct nf_conn *ct, | ||
411 | struct nf_conntrack_expect *exp) | ||
412 | { | ||
413 | struct nf_nat_range range; | ||
414 | |||
415 | /* This must be a fresh one. */ | ||
416 | BUG_ON(ct->status & IPS_NAT_DONE_MASK); | ||
417 | |||
418 | /* Change src to where master sends to */ | ||
419 | range.flags = IP_NAT_RANGE_MAP_IPS; | ||
420 | range.min_ip = range.max_ip | ||
421 | = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; | ||
422 | /* hook doesn't matter, but it has to do source manip */ | ||
423 | nf_nat_setup_info(ct, &range, NF_IP_POST_ROUTING); | ||
424 | |||
425 | /* For DST manip, map port here to where it's expected. */ | ||
426 | range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED); | ||
427 | range.min = range.max = exp->saved_proto; | ||
428 | range.min_ip = range.max_ip | ||
429 | = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip; | ||
430 | /* hook doesn't matter, but it has to do destination manip */ | ||
431 | nf_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING); | ||
432 | } | ||
433 | EXPORT_SYMBOL(nf_nat_follow_master); | ||
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c new file mode 100644 index 000000000000..9b8c0daea744 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_irc.c | |||
@@ -0,0 +1,101 @@ | |||
1 | /* IRC extension for TCP NAT alteration. | ||
2 | * | ||
3 | * (C) 2000-2001 by Harald Welte <laforge@gnumonks.org> | ||
4 | * (C) 2004 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation | ||
5 | * based on a copy of RR's ip_nat_ftp.c | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | #include <linux/tcp.h> | ||
16 | #include <linux/kernel.h> | ||
17 | |||
18 | #include <net/netfilter/nf_nat.h> | ||
19 | #include <net/netfilter/nf_nat_helper.h> | ||
20 | #include <net/netfilter/nf_nat_rule.h> | ||
21 | #include <net/netfilter/nf_conntrack_helper.h> | ||
22 | #include <net/netfilter/nf_conntrack_expect.h> | ||
23 | #include <linux/netfilter/nf_conntrack_irc.h> | ||
24 | |||
25 | #if 0 | ||
26 | #define DEBUGP printk | ||
27 | #else | ||
28 | #define DEBUGP(format, args...) | ||
29 | #endif | ||
30 | |||
31 | MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); | ||
32 | MODULE_DESCRIPTION("IRC (DCC) NAT helper"); | ||
33 | MODULE_LICENSE("GPL"); | ||
34 | MODULE_ALIAS("ip_nat_irc"); | ||
35 | |||
36 | static unsigned int help(struct sk_buff **pskb, | ||
37 | enum ip_conntrack_info ctinfo, | ||
38 | unsigned int matchoff, | ||
39 | unsigned int matchlen, | ||
40 | struct nf_conntrack_expect *exp) | ||
41 | { | ||
42 | char buffer[sizeof("4294967296 65635")]; | ||
43 | u_int32_t ip; | ||
44 | u_int16_t port; | ||
45 | unsigned int ret; | ||
46 | |||
47 | DEBUGP("IRC_NAT: info (seq %u + %u) in %u\n", | ||
48 | expect->seq, exp_irc_info->len, ntohl(tcph->seq)); | ||
49 | |||
50 | /* Reply comes from server. */ | ||
51 | exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; | ||
52 | exp->dir = IP_CT_DIR_REPLY; | ||
53 | exp->expectfn = nf_nat_follow_master; | ||
54 | |||
55 | /* Try to get same port: if not, try to change it. */ | ||
56 | for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { | ||
57 | exp->tuple.dst.u.tcp.port = htons(port); | ||
58 | if (nf_conntrack_expect_related(exp) == 0) | ||
59 | break; | ||
60 | } | ||
61 | |||
62 | if (port == 0) | ||
63 | return NF_DROP; | ||
64 | |||
65 | ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip); | ||
66 | sprintf(buffer, "%u %u", ip, port); | ||
67 | DEBUGP("nf_nat_irc: inserting '%s' == %u.%u.%u.%u, port %u\n", | ||
68 | buffer, NIPQUAD(ip), port); | ||
69 | |||
70 | ret = nf_nat_mangle_tcp_packet(pskb, exp->master, ctinfo, | ||
71 | matchoff, matchlen, buffer, | ||
72 | strlen(buffer)); | ||
73 | if (ret != NF_ACCEPT) | ||
74 | nf_conntrack_unexpect_related(exp); | ||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | static void __exit nf_nat_irc_fini(void) | ||
79 | { | ||
80 | rcu_assign_pointer(nf_nat_irc_hook, NULL); | ||
81 | synchronize_rcu(); | ||
82 | } | ||
83 | |||
84 | static int __init nf_nat_irc_init(void) | ||
85 | { | ||
86 | BUG_ON(rcu_dereference(nf_nat_irc_hook)); | ||
87 | rcu_assign_pointer(nf_nat_irc_hook, help); | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | /* Prior to 2.6.11, we had a ports param. No longer, but don't break users. */ | ||
92 | static int warn_set(const char *val, struct kernel_param *kp) | ||
93 | { | ||
94 | printk(KERN_INFO KBUILD_MODNAME | ||
95 | ": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n"); | ||
96 | return 0; | ||
97 | } | ||
98 | module_param_call(ports, warn_set, NULL, NULL, 0); | ||
99 | |||
100 | module_init(nf_nat_irc_init); | ||
101 | module_exit(nf_nat_irc_fini); | ||
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c new file mode 100644 index 000000000000..0ae45b79a4eb --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_pptp.c | |||
@@ -0,0 +1,315 @@ | |||
1 | /* | ||
2 | * nf_nat_pptp.c | ||
3 | * | ||
4 | * NAT support for PPTP (Point to Point Tunneling Protocol). | ||
5 | * PPTP is a a protocol for creating virtual private networks. | ||
6 | * It is a specification defined by Microsoft and some vendors | ||
7 | * working with Microsoft. PPTP is built on top of a modified | ||
8 | * version of the Internet Generic Routing Encapsulation Protocol. | ||
9 | * GRE is defined in RFC 1701 and RFC 1702. Documentation of | ||
10 | * PPTP can be found in RFC 2637 | ||
11 | * | ||
12 | * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org> | ||
13 | * | ||
14 | * Development of this code funded by Astaro AG (http://www.astaro.com/) | ||
15 | * | ||
16 | * TODO: - NAT to a unique tuple, not to TCP source port | ||
17 | * (needs netfilter tuple reservation) | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/tcp.h> | ||
22 | |||
23 | #include <net/netfilter/nf_nat.h> | ||
24 | #include <net/netfilter/nf_nat_helper.h> | ||
25 | #include <net/netfilter/nf_nat_rule.h> | ||
26 | #include <net/netfilter/nf_conntrack_helper.h> | ||
27 | #include <net/netfilter/nf_conntrack_expect.h> | ||
28 | #include <linux/netfilter/nf_conntrack_proto_gre.h> | ||
29 | #include <linux/netfilter/nf_conntrack_pptp.h> | ||
30 | |||
31 | #define NF_NAT_PPTP_VERSION "3.0" | ||
32 | |||
33 | #define REQ_CID(req, off) (*(__be16 *)((char *)(req) + (off))) | ||
34 | |||
35 | MODULE_LICENSE("GPL"); | ||
36 | MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); | ||
37 | MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP"); | ||
38 | MODULE_ALIAS("ip_nat_pptp"); | ||
39 | |||
40 | #if 0 | ||
41 | extern const char *pptp_msg_name[]; | ||
42 | #define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \ | ||
43 | __FUNCTION__, ## args) | ||
44 | #else | ||
45 | #define DEBUGP(format, args...) | ||
46 | #endif | ||
47 | |||
48 | static void pptp_nat_expected(struct nf_conn *ct, | ||
49 | struct nf_conntrack_expect *exp) | ||
50 | { | ||
51 | struct nf_conn *master = ct->master; | ||
52 | struct nf_conntrack_expect *other_exp; | ||
53 | struct nf_conntrack_tuple t; | ||
54 | struct nf_ct_pptp_master *ct_pptp_info; | ||
55 | struct nf_nat_pptp *nat_pptp_info; | ||
56 | struct ip_nat_range range; | ||
57 | |||
58 | ct_pptp_info = &nfct_help(master)->help.ct_pptp_info; | ||
59 | nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info; | ||
60 | |||
61 | /* And here goes the grand finale of corrosion... */ | ||
62 | if (exp->dir == IP_CT_DIR_ORIGINAL) { | ||
63 | DEBUGP("we are PNS->PAC\n"); | ||
64 | /* therefore, build tuple for PAC->PNS */ | ||
65 | t.src.l3num = AF_INET; | ||
66 | t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip; | ||
67 | t.src.u.gre.key = ct_pptp_info->pac_call_id; | ||
68 | t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip; | ||
69 | t.dst.u.gre.key = ct_pptp_info->pns_call_id; | ||
70 | t.dst.protonum = IPPROTO_GRE; | ||
71 | } else { | ||
72 | DEBUGP("we are PAC->PNS\n"); | ||
73 | /* build tuple for PNS->PAC */ | ||
74 | t.src.l3num = AF_INET; | ||
75 | t.src.u3.ip = master->tuplehash[exp->dir].tuple.src.u3.ip; | ||
76 | t.src.u.gre.key = nat_pptp_info->pns_call_id; | ||
77 | t.dst.u3.ip = master->tuplehash[exp->dir].tuple.dst.u3.ip; | ||
78 | t.dst.u.gre.key = nat_pptp_info->pac_call_id; | ||
79 | t.dst.protonum = IPPROTO_GRE; | ||
80 | } | ||
81 | |||
82 | DEBUGP("trying to unexpect other dir: "); | ||
83 | NF_CT_DUMP_TUPLE(&t); | ||
84 | other_exp = nf_conntrack_expect_find_get(&t); | ||
85 | if (other_exp) { | ||
86 | nf_conntrack_unexpect_related(other_exp); | ||
87 | nf_conntrack_expect_put(other_exp); | ||
88 | DEBUGP("success\n"); | ||
89 | } else { | ||
90 | DEBUGP("not found!\n"); | ||
91 | } | ||
92 | |||
93 | /* This must be a fresh one. */ | ||
94 | BUG_ON(ct->status & IPS_NAT_DONE_MASK); | ||
95 | |||
96 | /* Change src to where master sends to */ | ||
97 | range.flags = IP_NAT_RANGE_MAP_IPS; | ||
98 | range.min_ip = range.max_ip | ||
99 | = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; | ||
100 | if (exp->dir == IP_CT_DIR_ORIGINAL) { | ||
101 | range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED; | ||
102 | range.min = range.max = exp->saved_proto; | ||
103 | } | ||
104 | /* hook doesn't matter, but it has to do source manip */ | ||
105 | nf_nat_setup_info(ct, &range, NF_IP_POST_ROUTING); | ||
106 | |||
107 | /* For DST manip, map port here to where it's expected. */ | ||
108 | range.flags = IP_NAT_RANGE_MAP_IPS; | ||
109 | range.min_ip = range.max_ip | ||
110 | = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip; | ||
111 | if (exp->dir == IP_CT_DIR_REPLY) { | ||
112 | range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED; | ||
113 | range.min = range.max = exp->saved_proto; | ||
114 | } | ||
115 | /* hook doesn't matter, but it has to do destination manip */ | ||
116 | nf_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING); | ||
117 | } | ||
118 | |||
119 | /* outbound packets == from PNS to PAC */ | ||
120 | static int | ||
121 | pptp_outbound_pkt(struct sk_buff **pskb, | ||
122 | struct nf_conn *ct, | ||
123 | enum ip_conntrack_info ctinfo, | ||
124 | struct PptpControlHeader *ctlh, | ||
125 | union pptp_ctrl_union *pptpReq) | ||
126 | |||
127 | { | ||
128 | struct nf_ct_pptp_master *ct_pptp_info; | ||
129 | struct nf_nat_pptp *nat_pptp_info; | ||
130 | u_int16_t msg; | ||
131 | __be16 new_callid; | ||
132 | unsigned int cid_off; | ||
133 | |||
134 | ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info; | ||
135 | nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; | ||
136 | |||
137 | new_callid = ct_pptp_info->pns_call_id; | ||
138 | |||
139 | switch (msg = ntohs(ctlh->messageType)) { | ||
140 | case PPTP_OUT_CALL_REQUEST: | ||
141 | cid_off = offsetof(union pptp_ctrl_union, ocreq.callID); | ||
142 | /* FIXME: ideally we would want to reserve a call ID | ||
143 | * here. current netfilter NAT core is not able to do | ||
144 | * this :( For now we use TCP source port. This breaks | ||
145 | * multiple calls within one control session */ | ||
146 | |||
147 | /* save original call ID in nat_info */ | ||
148 | nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id; | ||
149 | |||
150 | /* don't use tcph->source since we are at a DSTmanip | ||
151 | * hook (e.g. PREROUTING) and pkt is not mangled yet */ | ||
152 | new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port; | ||
153 | |||
154 | /* save new call ID in ct info */ | ||
155 | ct_pptp_info->pns_call_id = new_callid; | ||
156 | break; | ||
157 | case PPTP_IN_CALL_REPLY: | ||
158 | cid_off = offsetof(union pptp_ctrl_union, icack.callID); | ||
159 | break; | ||
160 | case PPTP_CALL_CLEAR_REQUEST: | ||
161 | cid_off = offsetof(union pptp_ctrl_union, clrreq.callID); | ||
162 | break; | ||
163 | default: | ||
164 | DEBUGP("unknown outbound packet 0x%04x:%s\n", msg, | ||
165 | (msg <= PPTP_MSG_MAX)? | ||
166 | pptp_msg_name[msg]:pptp_msg_name[0]); | ||
167 | /* fall through */ | ||
168 | case PPTP_SET_LINK_INFO: | ||
169 | /* only need to NAT in case PAC is behind NAT box */ | ||
170 | case PPTP_START_SESSION_REQUEST: | ||
171 | case PPTP_START_SESSION_REPLY: | ||
172 | case PPTP_STOP_SESSION_REQUEST: | ||
173 | case PPTP_STOP_SESSION_REPLY: | ||
174 | case PPTP_ECHO_REQUEST: | ||
175 | case PPTP_ECHO_REPLY: | ||
176 | /* no need to alter packet */ | ||
177 | return NF_ACCEPT; | ||
178 | } | ||
179 | |||
180 | /* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass | ||
181 | * down to here */ | ||
182 | DEBUGP("altering call id from 0x%04x to 0x%04x\n", | ||
183 | ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid)); | ||
184 | |||
185 | /* mangle packet */ | ||
186 | if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, | ||
187 | cid_off + sizeof(struct pptp_pkt_hdr) + | ||
188 | sizeof(struct PptpControlHeader), | ||
189 | sizeof(new_callid), (char *)&new_callid, | ||
190 | sizeof(new_callid)) == 0) | ||
191 | return NF_DROP; | ||
192 | return NF_ACCEPT; | ||
193 | } | ||
194 | |||
195 | static void | ||
196 | pptp_exp_gre(struct nf_conntrack_expect *expect_orig, | ||
197 | struct nf_conntrack_expect *expect_reply) | ||
198 | { | ||
199 | struct nf_conn *ct = expect_orig->master; | ||
200 | struct nf_ct_pptp_master *ct_pptp_info; | ||
201 | struct nf_nat_pptp *nat_pptp_info; | ||
202 | |||
203 | ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info; | ||
204 | nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; | ||
205 | |||
206 | /* save original PAC call ID in nat_info */ | ||
207 | nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id; | ||
208 | |||
209 | /* alter expectation for PNS->PAC direction */ | ||
210 | expect_orig->saved_proto.gre.key = ct_pptp_info->pns_call_id; | ||
211 | expect_orig->tuple.src.u.gre.key = nat_pptp_info->pns_call_id; | ||
212 | expect_orig->tuple.dst.u.gre.key = ct_pptp_info->pac_call_id; | ||
213 | expect_orig->dir = IP_CT_DIR_ORIGINAL; | ||
214 | |||
215 | /* alter expectation for PAC->PNS direction */ | ||
216 | expect_reply->saved_proto.gre.key = nat_pptp_info->pns_call_id; | ||
217 | expect_reply->tuple.src.u.gre.key = nat_pptp_info->pac_call_id; | ||
218 | expect_reply->tuple.dst.u.gre.key = ct_pptp_info->pns_call_id; | ||
219 | expect_reply->dir = IP_CT_DIR_REPLY; | ||
220 | } | ||
221 | |||
222 | /* inbound packets == from PAC to PNS */ | ||
223 | static int | ||
224 | pptp_inbound_pkt(struct sk_buff **pskb, | ||
225 | struct nf_conn *ct, | ||
226 | enum ip_conntrack_info ctinfo, | ||
227 | struct PptpControlHeader *ctlh, | ||
228 | union pptp_ctrl_union *pptpReq) | ||
229 | { | ||
230 | struct nf_nat_pptp *nat_pptp_info; | ||
231 | u_int16_t msg; | ||
232 | __be16 new_pcid; | ||
233 | unsigned int pcid_off; | ||
234 | |||
235 | nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; | ||
236 | new_pcid = nat_pptp_info->pns_call_id; | ||
237 | |||
238 | switch (msg = ntohs(ctlh->messageType)) { | ||
239 | case PPTP_OUT_CALL_REPLY: | ||
240 | pcid_off = offsetof(union pptp_ctrl_union, ocack.peersCallID); | ||
241 | break; | ||
242 | case PPTP_IN_CALL_CONNECT: | ||
243 | pcid_off = offsetof(union pptp_ctrl_union, iccon.peersCallID); | ||
244 | break; | ||
245 | case PPTP_IN_CALL_REQUEST: | ||
246 | /* only need to nat in case PAC is behind NAT box */ | ||
247 | return NF_ACCEPT; | ||
248 | case PPTP_WAN_ERROR_NOTIFY: | ||
249 | pcid_off = offsetof(union pptp_ctrl_union, wanerr.peersCallID); | ||
250 | break; | ||
251 | case PPTP_CALL_DISCONNECT_NOTIFY: | ||
252 | pcid_off = offsetof(union pptp_ctrl_union, disc.callID); | ||
253 | break; | ||
254 | case PPTP_SET_LINK_INFO: | ||
255 | pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); | ||
256 | break; | ||
257 | default: | ||
258 | DEBUGP("unknown inbound packet %s\n", (msg <= PPTP_MSG_MAX)? | ||
259 | pptp_msg_name[msg]:pptp_msg_name[0]); | ||
260 | /* fall through */ | ||
261 | case PPTP_START_SESSION_REQUEST: | ||
262 | case PPTP_START_SESSION_REPLY: | ||
263 | case PPTP_STOP_SESSION_REQUEST: | ||
264 | case PPTP_STOP_SESSION_REPLY: | ||
265 | case PPTP_ECHO_REQUEST: | ||
266 | case PPTP_ECHO_REPLY: | ||
267 | /* no need to alter packet */ | ||
268 | return NF_ACCEPT; | ||
269 | } | ||
270 | |||
271 | /* only OUT_CALL_REPLY, IN_CALL_CONNECT, IN_CALL_REQUEST, | ||
272 | * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */ | ||
273 | |||
274 | /* mangle packet */ | ||
275 | DEBUGP("altering peer call id from 0x%04x to 0x%04x\n", | ||
276 | ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); | ||
277 | |||
278 | if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, | ||
279 | pcid_off + sizeof(struct pptp_pkt_hdr) + | ||
280 | sizeof(struct PptpControlHeader), | ||
281 | sizeof(new_pcid), (char *)&new_pcid, | ||
282 | sizeof(new_pcid)) == 0) | ||
283 | return NF_DROP; | ||
284 | return NF_ACCEPT; | ||
285 | } | ||
286 | |||
287 | static int __init nf_nat_helper_pptp_init(void) | ||
288 | { | ||
289 | nf_nat_need_gre(); | ||
290 | |||
291 | BUG_ON(rcu_dereference(nf_nat_pptp_hook_outbound)); | ||
292 | rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt); | ||
293 | |||
294 | BUG_ON(rcu_dereference(nf_nat_pptp_hook_inbound)); | ||
295 | rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt); | ||
296 | |||
297 | BUG_ON(rcu_dereference(nf_nat_pptp_hook_exp_gre)); | ||
298 | rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre); | ||
299 | |||
300 | BUG_ON(rcu_dereference(nf_nat_pptp_hook_expectfn)); | ||
301 | rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected); | ||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | static void __exit nf_nat_helper_pptp_fini(void) | ||
306 | { | ||
307 | rcu_assign_pointer(nf_nat_pptp_hook_expectfn, NULL); | ||
308 | rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, NULL); | ||
309 | rcu_assign_pointer(nf_nat_pptp_hook_inbound, NULL); | ||
310 | rcu_assign_pointer(nf_nat_pptp_hook_outbound, NULL); | ||
311 | synchronize_rcu(); | ||
312 | } | ||
313 | |||
314 | module_init(nf_nat_helper_pptp_init); | ||
315 | module_exit(nf_nat_helper_pptp_fini); | ||
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c new file mode 100644 index 000000000000..d3de579e09d2 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_proto_gre.c | |||
@@ -0,0 +1,179 @@ | |||
1 | /* | ||
2 | * nf_nat_proto_gre.c | ||
3 | * | ||
4 | * NAT protocol helper module for GRE. | ||
5 | * | ||
6 | * GRE is a generic encapsulation protocol, which is generally not very | ||
7 | * suited for NAT, as it has no protocol-specific part as port numbers. | ||
8 | * | ||
9 | * It has an optional key field, which may help us distinguishing two | ||
10 | * connections between the same two hosts. | ||
11 | * | ||
12 | * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784 | ||
13 | * | ||
14 | * PPTP is built on top of a modified version of GRE, and has a mandatory | ||
15 | * field called "CallID", which serves us for the same purpose as the key | ||
16 | * field in plain GRE. | ||
17 | * | ||
18 | * Documentation about PPTP can be found in RFC 2637 | ||
19 | * | ||
20 | * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org> | ||
21 | * | ||
22 | * Development of this code funded by Astaro AG (http://www.astaro.com/) | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | #include <linux/skbuff.h> | ||
28 | #include <linux/ip.h> | ||
29 | |||
30 | #include <net/netfilter/nf_nat.h> | ||
31 | #include <net/netfilter/nf_nat_rule.h> | ||
32 | #include <net/netfilter/nf_nat_protocol.h> | ||
33 | #include <linux/netfilter/nf_conntrack_proto_gre.h> | ||
34 | |||
35 | MODULE_LICENSE("GPL"); | ||
36 | MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); | ||
37 | MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE"); | ||
38 | |||
39 | #if 0 | ||
40 | #define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \ | ||
41 | __FUNCTION__, ## args) | ||
42 | #else | ||
43 | #define DEBUGP(x, args...) | ||
44 | #endif | ||
45 | |||
46 | /* is key in given range between min and max */ | ||
47 | static int | ||
48 | gre_in_range(const struct nf_conntrack_tuple *tuple, | ||
49 | enum nf_nat_manip_type maniptype, | ||
50 | const union nf_conntrack_man_proto *min, | ||
51 | const union nf_conntrack_man_proto *max) | ||
52 | { | ||
53 | __be16 key; | ||
54 | |||
55 | if (maniptype == IP_NAT_MANIP_SRC) | ||
56 | key = tuple->src.u.gre.key; | ||
57 | else | ||
58 | key = tuple->dst.u.gre.key; | ||
59 | |||
60 | return ntohs(key) >= ntohs(min->gre.key) && | ||
61 | ntohs(key) <= ntohs(max->gre.key); | ||
62 | } | ||
63 | |||
64 | /* generate unique tuple ... */ | ||
65 | static int | ||
66 | gre_unique_tuple(struct nf_conntrack_tuple *tuple, | ||
67 | const struct nf_nat_range *range, | ||
68 | enum nf_nat_manip_type maniptype, | ||
69 | const struct nf_conn *conntrack) | ||
70 | { | ||
71 | static u_int16_t key; | ||
72 | __be16 *keyptr; | ||
73 | unsigned int min, i, range_size; | ||
74 | |||
75 | if (maniptype == IP_NAT_MANIP_SRC) | ||
76 | keyptr = &tuple->src.u.gre.key; | ||
77 | else | ||
78 | keyptr = &tuple->dst.u.gre.key; | ||
79 | |||
80 | if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { | ||
81 | DEBUGP("%p: NATing GRE PPTP\n", conntrack); | ||
82 | min = 1; | ||
83 | range_size = 0xffff; | ||
84 | } else { | ||
85 | min = ntohs(range->min.gre.key); | ||
86 | range_size = ntohs(range->max.gre.key) - min + 1; | ||
87 | } | ||
88 | |||
89 | DEBUGP("min = %u, range_size = %u\n", min, range_size); | ||
90 | |||
91 | for (i = 0; i < range_size; i++, key++) { | ||
92 | *keyptr = htons(min + key % range_size); | ||
93 | if (!nf_nat_used_tuple(tuple, conntrack)) | ||
94 | return 1; | ||
95 | } | ||
96 | |||
97 | DEBUGP("%p: no NAT mapping\n", conntrack); | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | /* manipulate a GRE packet according to maniptype */ | ||
102 | static int | ||
103 | gre_manip_pkt(struct sk_buff **pskb, unsigned int iphdroff, | ||
104 | const struct nf_conntrack_tuple *tuple, | ||
105 | enum nf_nat_manip_type maniptype) | ||
106 | { | ||
107 | struct gre_hdr *greh; | ||
108 | struct gre_hdr_pptp *pgreh; | ||
109 | struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); | ||
110 | unsigned int hdroff = iphdroff + iph->ihl * 4; | ||
111 | |||
112 | /* pgreh includes two optional 32bit fields which are not required | ||
113 | * to be there. That's where the magic '8' comes from */ | ||
114 | if (!skb_make_writable(pskb, hdroff + sizeof(*pgreh) - 8)) | ||
115 | return 0; | ||
116 | |||
117 | greh = (void *)(*pskb)->data + hdroff; | ||
118 | pgreh = (struct gre_hdr_pptp *)greh; | ||
119 | |||
120 | /* we only have destination manip of a packet, since 'source key' | ||
121 | * is not present in the packet itself */ | ||
122 | if (maniptype != IP_NAT_MANIP_DST) | ||
123 | return 1; | ||
124 | switch (greh->version) { | ||
125 | case 0: | ||
126 | if (!greh->key) { | ||
127 | DEBUGP("can't nat GRE w/o key\n"); | ||
128 | break; | ||
129 | } | ||
130 | if (greh->csum) { | ||
131 | /* FIXME: Never tested this code... */ | ||
132 | nf_proto_csum_replace4(gre_csum(greh), *pskb, | ||
133 | *(gre_key(greh)), | ||
134 | tuple->dst.u.gre.key, 0); | ||
135 | } | ||
136 | *(gre_key(greh)) = tuple->dst.u.gre.key; | ||
137 | break; | ||
138 | case GRE_VERSION_PPTP: | ||
139 | DEBUGP("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key)); | ||
140 | pgreh->call_id = tuple->dst.u.gre.key; | ||
141 | break; | ||
142 | default: | ||
143 | DEBUGP("can't nat unknown GRE version\n"); | ||
144 | return 0; | ||
145 | } | ||
146 | return 1; | ||
147 | } | ||
148 | |||
149 | static struct nf_nat_protocol gre __read_mostly = { | ||
150 | .name = "GRE", | ||
151 | .protonum = IPPROTO_GRE, | ||
152 | .manip_pkt = gre_manip_pkt, | ||
153 | .in_range = gre_in_range, | ||
154 | .unique_tuple = gre_unique_tuple, | ||
155 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ | ||
156 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) | ||
157 | .range_to_nfattr = nf_nat_port_range_to_nfattr, | ||
158 | .nfattr_to_range = nf_nat_port_nfattr_to_range, | ||
159 | #endif | ||
160 | }; | ||
161 | |||
162 | int __init nf_nat_proto_gre_init(void) | ||
163 | { | ||
164 | return nf_nat_protocol_register(&gre); | ||
165 | } | ||
166 | |||
167 | void __exit nf_nat_proto_gre_fini(void) | ||
168 | { | ||
169 | nf_nat_protocol_unregister(&gre); | ||
170 | } | ||
171 | |||
172 | module_init(nf_nat_proto_gre_init); | ||
173 | module_exit(nf_nat_proto_gre_fini); | ||
174 | |||
175 | void nf_nat_need_gre(void) | ||
176 | { | ||
177 | return; | ||
178 | } | ||
179 | EXPORT_SYMBOL_GPL(nf_nat_need_gre); | ||
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c new file mode 100644 index 000000000000..dcfd772972d7 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* (C) 1999-2001 Paul `Rusty' Russell | ||
2 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/ip.h> | ||
12 | #include <linux/icmp.h> | ||
13 | |||
14 | #include <linux/netfilter.h> | ||
15 | #include <net/netfilter/nf_nat.h> | ||
16 | #include <net/netfilter/nf_nat_core.h> | ||
17 | #include <net/netfilter/nf_nat_rule.h> | ||
18 | #include <net/netfilter/nf_nat_protocol.h> | ||
19 | |||
20 | static int | ||
21 | icmp_in_range(const struct nf_conntrack_tuple *tuple, | ||
22 | enum nf_nat_manip_type maniptype, | ||
23 | const union nf_conntrack_man_proto *min, | ||
24 | const union nf_conntrack_man_proto *max) | ||
25 | { | ||
26 | return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) && | ||
27 | ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id); | ||
28 | } | ||
29 | |||
30 | static int | ||
31 | icmp_unique_tuple(struct nf_conntrack_tuple *tuple, | ||
32 | const struct nf_nat_range *range, | ||
33 | enum nf_nat_manip_type maniptype, | ||
34 | const struct nf_conn *ct) | ||
35 | { | ||
36 | static u_int16_t id; | ||
37 | unsigned int range_size; | ||
38 | unsigned int i; | ||
39 | |||
40 | range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1; | ||
41 | /* If no range specified... */ | ||
42 | if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) | ||
43 | range_size = 0xFFFF; | ||
44 | |||
45 | for (i = 0; i < range_size; i++, id++) { | ||
46 | tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) + | ||
47 | (id % range_size)); | ||
48 | if (!nf_nat_used_tuple(tuple, ct)) | ||
49 | return 1; | ||
50 | } | ||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | static int | ||
55 | icmp_manip_pkt(struct sk_buff **pskb, | ||
56 | unsigned int iphdroff, | ||
57 | const struct nf_conntrack_tuple *tuple, | ||
58 | enum nf_nat_manip_type maniptype) | ||
59 | { | ||
60 | struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); | ||
61 | struct icmphdr *hdr; | ||
62 | unsigned int hdroff = iphdroff + iph->ihl*4; | ||
63 | |||
64 | if (!skb_make_writable(pskb, hdroff + sizeof(*hdr))) | ||
65 | return 0; | ||
66 | |||
67 | hdr = (struct icmphdr *)((*pskb)->data + hdroff); | ||
68 | nf_proto_csum_replace2(&hdr->checksum, *pskb, | ||
69 | hdr->un.echo.id, tuple->src.u.icmp.id, 0); | ||
70 | hdr->un.echo.id = tuple->src.u.icmp.id; | ||
71 | return 1; | ||
72 | } | ||
73 | |||
74 | struct nf_nat_protocol nf_nat_protocol_icmp = { | ||
75 | .name = "ICMP", | ||
76 | .protonum = IPPROTO_ICMP, | ||
77 | .me = THIS_MODULE, | ||
78 | .manip_pkt = icmp_manip_pkt, | ||
79 | .in_range = icmp_in_range, | ||
80 | .unique_tuple = icmp_unique_tuple, | ||
81 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ | ||
82 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) | ||
83 | .range_to_nfattr = nf_nat_port_range_to_nfattr, | ||
84 | .nfattr_to_range = nf_nat_port_nfattr_to_range, | ||
85 | #endif | ||
86 | }; | ||
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c new file mode 100644 index 000000000000..7e26a7e9bee1 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c | |||
@@ -0,0 +1,148 @@ | |||
1 | /* (C) 1999-2001 Paul `Rusty' Russell | ||
2 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/ip.h> | ||
12 | #include <linux/tcp.h> | ||
13 | |||
14 | #include <linux/netfilter.h> | ||
15 | #include <linux/netfilter/nfnetlink_conntrack.h> | ||
16 | #include <net/netfilter/nf_nat.h> | ||
17 | #include <net/netfilter/nf_nat_rule.h> | ||
18 | #include <net/netfilter/nf_nat_protocol.h> | ||
19 | #include <net/netfilter/nf_nat_core.h> | ||
20 | |||
21 | static int | ||
22 | tcp_in_range(const struct nf_conntrack_tuple *tuple, | ||
23 | enum nf_nat_manip_type maniptype, | ||
24 | const union nf_conntrack_man_proto *min, | ||
25 | const union nf_conntrack_man_proto *max) | ||
26 | { | ||
27 | __be16 port; | ||
28 | |||
29 | if (maniptype == IP_NAT_MANIP_SRC) | ||
30 | port = tuple->src.u.tcp.port; | ||
31 | else | ||
32 | port = tuple->dst.u.tcp.port; | ||
33 | |||
34 | return ntohs(port) >= ntohs(min->tcp.port) && | ||
35 | ntohs(port) <= ntohs(max->tcp.port); | ||
36 | } | ||
37 | |||
38 | static int | ||
39 | tcp_unique_tuple(struct nf_conntrack_tuple *tuple, | ||
40 | const struct nf_nat_range *range, | ||
41 | enum nf_nat_manip_type maniptype, | ||
42 | const struct nf_conn *ct) | ||
43 | { | ||
44 | static u_int16_t port; | ||
45 | __be16 *portptr; | ||
46 | unsigned int range_size, min, i; | ||
47 | |||
48 | if (maniptype == IP_NAT_MANIP_SRC) | ||
49 | portptr = &tuple->src.u.tcp.port; | ||
50 | else | ||
51 | portptr = &tuple->dst.u.tcp.port; | ||
52 | |||
53 | /* If no range specified... */ | ||
54 | if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { | ||
55 | /* If it's dst rewrite, can't change port */ | ||
56 | if (maniptype == IP_NAT_MANIP_DST) | ||
57 | return 0; | ||
58 | |||
59 | /* Map privileged onto privileged. */ | ||
60 | if (ntohs(*portptr) < 1024) { | ||
61 | /* Loose convention: >> 512 is credential passing */ | ||
62 | if (ntohs(*portptr)<512) { | ||
63 | min = 1; | ||
64 | range_size = 511 - min + 1; | ||
65 | } else { | ||
66 | min = 600; | ||
67 | range_size = 1023 - min + 1; | ||
68 | } | ||
69 | } else { | ||
70 | min = 1024; | ||
71 | range_size = 65535 - 1024 + 1; | ||
72 | } | ||
73 | } else { | ||
74 | min = ntohs(range->min.tcp.port); | ||
75 | range_size = ntohs(range->max.tcp.port) - min + 1; | ||
76 | } | ||
77 | |||
78 | for (i = 0; i < range_size; i++, port++) { | ||
79 | *portptr = htons(min + port % range_size); | ||
80 | if (!nf_nat_used_tuple(tuple, ct)) | ||
81 | return 1; | ||
82 | } | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | static int | ||
87 | tcp_manip_pkt(struct sk_buff **pskb, | ||
88 | unsigned int iphdroff, | ||
89 | const struct nf_conntrack_tuple *tuple, | ||
90 | enum nf_nat_manip_type maniptype) | ||
91 | { | ||
92 | struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); | ||
93 | struct tcphdr *hdr; | ||
94 | unsigned int hdroff = iphdroff + iph->ihl*4; | ||
95 | __be32 oldip, newip; | ||
96 | __be16 *portptr, newport, oldport; | ||
97 | int hdrsize = 8; /* TCP connection tracking guarantees this much */ | ||
98 | |||
99 | /* this could be a inner header returned in icmp packet; in such | ||
100 | cases we cannot update the checksum field since it is outside of | ||
101 | the 8 bytes of transport layer headers we are guaranteed */ | ||
102 | if ((*pskb)->len >= hdroff + sizeof(struct tcphdr)) | ||
103 | hdrsize = sizeof(struct tcphdr); | ||
104 | |||
105 | if (!skb_make_writable(pskb, hdroff + hdrsize)) | ||
106 | return 0; | ||
107 | |||
108 | iph = (struct iphdr *)((*pskb)->data + iphdroff); | ||
109 | hdr = (struct tcphdr *)((*pskb)->data + hdroff); | ||
110 | |||
111 | if (maniptype == IP_NAT_MANIP_SRC) { | ||
112 | /* Get rid of src ip and src pt */ | ||
113 | oldip = iph->saddr; | ||
114 | newip = tuple->src.u3.ip; | ||
115 | newport = tuple->src.u.tcp.port; | ||
116 | portptr = &hdr->source; | ||
117 | } else { | ||
118 | /* Get rid of dst ip and dst pt */ | ||
119 | oldip = iph->daddr; | ||
120 | newip = tuple->dst.u3.ip; | ||
121 | newport = tuple->dst.u.tcp.port; | ||
122 | portptr = &hdr->dest; | ||
123 | } | ||
124 | |||
125 | oldport = *portptr; | ||
126 | *portptr = newport; | ||
127 | |||
128 | if (hdrsize < sizeof(*hdr)) | ||
129 | return 1; | ||
130 | |||
131 | nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1); | ||
132 | nf_proto_csum_replace2(&hdr->check, *pskb, oldport, newport, 0); | ||
133 | return 1; | ||
134 | } | ||
135 | |||
136 | struct nf_nat_protocol nf_nat_protocol_tcp = { | ||
137 | .name = "TCP", | ||
138 | .protonum = IPPROTO_TCP, | ||
139 | .me = THIS_MODULE, | ||
140 | .manip_pkt = tcp_manip_pkt, | ||
141 | .in_range = tcp_in_range, | ||
142 | .unique_tuple = tcp_unique_tuple, | ||
143 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ | ||
144 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) | ||
145 | .range_to_nfattr = nf_nat_port_range_to_nfattr, | ||
146 | .nfattr_to_range = nf_nat_port_nfattr_to_range, | ||
147 | #endif | ||
148 | }; | ||
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c new file mode 100644 index 000000000000..ab0ce4c8699f --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_proto_udp.c | |||
@@ -0,0 +1,138 @@ | |||
1 | /* (C) 1999-2001 Paul `Rusty' Russell | ||
2 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/ip.h> | ||
12 | #include <linux/udp.h> | ||
13 | |||
14 | #include <linux/netfilter.h> | ||
15 | #include <net/netfilter/nf_nat.h> | ||
16 | #include <net/netfilter/nf_nat_core.h> | ||
17 | #include <net/netfilter/nf_nat_rule.h> | ||
18 | #include <net/netfilter/nf_nat_protocol.h> | ||
19 | |||
20 | static int | ||
21 | udp_in_range(const struct nf_conntrack_tuple *tuple, | ||
22 | enum nf_nat_manip_type maniptype, | ||
23 | const union nf_conntrack_man_proto *min, | ||
24 | const union nf_conntrack_man_proto *max) | ||
25 | { | ||
26 | __be16 port; | ||
27 | |||
28 | if (maniptype == IP_NAT_MANIP_SRC) | ||
29 | port = tuple->src.u.udp.port; | ||
30 | else | ||
31 | port = tuple->dst.u.udp.port; | ||
32 | |||
33 | return ntohs(port) >= ntohs(min->udp.port) && | ||
34 | ntohs(port) <= ntohs(max->udp.port); | ||
35 | } | ||
36 | |||
37 | static int | ||
38 | udp_unique_tuple(struct nf_conntrack_tuple *tuple, | ||
39 | const struct nf_nat_range *range, | ||
40 | enum nf_nat_manip_type maniptype, | ||
41 | const struct nf_conn *ct) | ||
42 | { | ||
43 | static u_int16_t port; | ||
44 | __be16 *portptr; | ||
45 | unsigned int range_size, min, i; | ||
46 | |||
47 | if (maniptype == IP_NAT_MANIP_SRC) | ||
48 | portptr = &tuple->src.u.udp.port; | ||
49 | else | ||
50 | portptr = &tuple->dst.u.udp.port; | ||
51 | |||
52 | /* If no range specified... */ | ||
53 | if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { | ||
54 | /* If it's dst rewrite, can't change port */ | ||
55 | if (maniptype == IP_NAT_MANIP_DST) | ||
56 | return 0; | ||
57 | |||
58 | if (ntohs(*portptr) < 1024) { | ||
59 | /* Loose convention: >> 512 is credential passing */ | ||
60 | if (ntohs(*portptr)<512) { | ||
61 | min = 1; | ||
62 | range_size = 511 - min + 1; | ||
63 | } else { | ||
64 | min = 600; | ||
65 | range_size = 1023 - min + 1; | ||
66 | } | ||
67 | } else { | ||
68 | min = 1024; | ||
69 | range_size = 65535 - 1024 + 1; | ||
70 | } | ||
71 | } else { | ||
72 | min = ntohs(range->min.udp.port); | ||
73 | range_size = ntohs(range->max.udp.port) - min + 1; | ||
74 | } | ||
75 | |||
76 | for (i = 0; i < range_size; i++, port++) { | ||
77 | *portptr = htons(min + port % range_size); | ||
78 | if (!nf_nat_used_tuple(tuple, ct)) | ||
79 | return 1; | ||
80 | } | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static int | ||
85 | udp_manip_pkt(struct sk_buff **pskb, | ||
86 | unsigned int iphdroff, | ||
87 | const struct nf_conntrack_tuple *tuple, | ||
88 | enum nf_nat_manip_type maniptype) | ||
89 | { | ||
90 | struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff); | ||
91 | struct udphdr *hdr; | ||
92 | unsigned int hdroff = iphdroff + iph->ihl*4; | ||
93 | __be32 oldip, newip; | ||
94 | __be16 *portptr, newport; | ||
95 | |||
96 | if (!skb_make_writable(pskb, hdroff + sizeof(*hdr))) | ||
97 | return 0; | ||
98 | |||
99 | iph = (struct iphdr *)((*pskb)->data + iphdroff); | ||
100 | hdr = (struct udphdr *)((*pskb)->data + hdroff); | ||
101 | |||
102 | if (maniptype == IP_NAT_MANIP_SRC) { | ||
103 | /* Get rid of src ip and src pt */ | ||
104 | oldip = iph->saddr; | ||
105 | newip = tuple->src.u3.ip; | ||
106 | newport = tuple->src.u.udp.port; | ||
107 | portptr = &hdr->source; | ||
108 | } else { | ||
109 | /* Get rid of dst ip and dst pt */ | ||
110 | oldip = iph->daddr; | ||
111 | newip = tuple->dst.u3.ip; | ||
112 | newport = tuple->dst.u.udp.port; | ||
113 | portptr = &hdr->dest; | ||
114 | } | ||
115 | if (hdr->check || (*pskb)->ip_summed == CHECKSUM_PARTIAL) { | ||
116 | nf_proto_csum_replace4(&hdr->check, *pskb, oldip, newip, 1); | ||
117 | nf_proto_csum_replace2(&hdr->check, *pskb, *portptr, newport, | ||
118 | 0); | ||
119 | if (!hdr->check) | ||
120 | hdr->check = CSUM_MANGLED_0; | ||
121 | } | ||
122 | *portptr = newport; | ||
123 | return 1; | ||
124 | } | ||
125 | |||
126 | struct nf_nat_protocol nf_nat_protocol_udp = { | ||
127 | .name = "UDP", | ||
128 | .protonum = IPPROTO_UDP, | ||
129 | .me = THIS_MODULE, | ||
130 | .manip_pkt = udp_manip_pkt, | ||
131 | .in_range = udp_in_range, | ||
132 | .unique_tuple = udp_unique_tuple, | ||
133 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ | ||
134 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) | ||
135 | .range_to_nfattr = nf_nat_port_range_to_nfattr, | ||
136 | .nfattr_to_range = nf_nat_port_nfattr_to_range, | ||
137 | #endif | ||
138 | }; | ||
diff --git a/net/ipv4/netfilter/nf_nat_proto_unknown.c b/net/ipv4/netfilter/nf_nat_proto_unknown.c new file mode 100644 index 000000000000..f50d0203f9c0 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_proto_unknown.c | |||
@@ -0,0 +1,54 @@ | |||
1 | /* The "unknown" protocol. This is what is used for protocols we | ||
2 | * don't understand. It's returned by ip_ct_find_proto(). | ||
3 | */ | ||
4 | |||
5 | /* (C) 1999-2001 Paul `Rusty' Russell | ||
6 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <linux/init.h> | ||
15 | |||
16 | #include <linux/netfilter.h> | ||
17 | #include <net/netfilter/nf_nat.h> | ||
18 | #include <net/netfilter/nf_nat_rule.h> | ||
19 | #include <net/netfilter/nf_nat_protocol.h> | ||
20 | |||
21 | static int unknown_in_range(const struct nf_conntrack_tuple *tuple, | ||
22 | enum nf_nat_manip_type manip_type, | ||
23 | const union nf_conntrack_man_proto *min, | ||
24 | const union nf_conntrack_man_proto *max) | ||
25 | { | ||
26 | return 1; | ||
27 | } | ||
28 | |||
29 | static int unknown_unique_tuple(struct nf_conntrack_tuple *tuple, | ||
30 | const struct nf_nat_range *range, | ||
31 | enum nf_nat_manip_type maniptype, | ||
32 | const struct nf_conn *ct) | ||
33 | { | ||
34 | /* Sorry: we can't help you; if it's not unique, we can't frob | ||
35 | anything. */ | ||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | static int | ||
40 | unknown_manip_pkt(struct sk_buff **pskb, | ||
41 | unsigned int iphdroff, | ||
42 | const struct nf_conntrack_tuple *tuple, | ||
43 | enum nf_nat_manip_type maniptype) | ||
44 | { | ||
45 | return 1; | ||
46 | } | ||
47 | |||
48 | struct nf_nat_protocol nf_nat_unknown_protocol = { | ||
49 | .name = "unknown", | ||
50 | /* .me isn't set: getting a ref to this cannot fail. */ | ||
51 | .manip_pkt = unknown_manip_pkt, | ||
52 | .in_range = unknown_in_range, | ||
53 | .unique_tuple = unknown_unique_tuple, | ||
54 | }; | ||
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c new file mode 100644 index 000000000000..b868ee0195d4 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_rule.c | |||
@@ -0,0 +1,343 @@ | |||
1 | /* (C) 1999-2001 Paul `Rusty' Russell | ||
2 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | /* Everything about the rules for NAT. */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/ip.h> | ||
12 | #include <linux/netfilter.h> | ||
13 | #include <linux/netfilter_ipv4.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/kmod.h> | ||
16 | #include <linux/skbuff.h> | ||
17 | #include <linux/proc_fs.h> | ||
18 | #include <net/checksum.h> | ||
19 | #include <net/route.h> | ||
20 | #include <linux/bitops.h> | ||
21 | |||
22 | #include <linux/netfilter_ipv4/ip_tables.h> | ||
23 | #include <net/netfilter/nf_nat.h> | ||
24 | #include <net/netfilter/nf_nat_core.h> | ||
25 | #include <net/netfilter/nf_nat_rule.h> | ||
26 | |||
27 | #if 0 | ||
28 | #define DEBUGP printk | ||
29 | #else | ||
30 | #define DEBUGP(format, args...) | ||
31 | #endif | ||
32 | |||
33 | #define NAT_VALID_HOOKS ((1<<NF_IP_PRE_ROUTING) | (1<<NF_IP_POST_ROUTING) | (1<<NF_IP_LOCAL_OUT)) | ||
34 | |||
35 | static struct | ||
36 | { | ||
37 | struct ipt_replace repl; | ||
38 | struct ipt_standard entries[3]; | ||
39 | struct ipt_error term; | ||
40 | } nat_initial_table __initdata = { | ||
41 | .repl = { | ||
42 | .name = "nat", | ||
43 | .valid_hooks = NAT_VALID_HOOKS, | ||
44 | .num_entries = 4, | ||
45 | .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error), | ||
46 | .hook_entry = { | ||
47 | [NF_IP_PRE_ROUTING] = 0, | ||
48 | [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard), | ||
49 | [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 }, | ||
50 | .underflow = { | ||
51 | [NF_IP_PRE_ROUTING] = 0, | ||
52 | [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard), | ||
53 | [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 }, | ||
54 | }, | ||
55 | .entries = { | ||
56 | /* PRE_ROUTING */ | ||
57 | { | ||
58 | .entry = { | ||
59 | .target_offset = sizeof(struct ipt_entry), | ||
60 | .next_offset = sizeof(struct ipt_standard), | ||
61 | }, | ||
62 | .target = { | ||
63 | .target = { | ||
64 | .u = { | ||
65 | .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)), | ||
66 | }, | ||
67 | }, | ||
68 | .verdict = -NF_ACCEPT - 1, | ||
69 | }, | ||
70 | }, | ||
71 | /* POST_ROUTING */ | ||
72 | { | ||
73 | .entry = { | ||
74 | .target_offset = sizeof(struct ipt_entry), | ||
75 | .next_offset = sizeof(struct ipt_standard), | ||
76 | }, | ||
77 | .target = { | ||
78 | .target = { | ||
79 | .u = { | ||
80 | .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)), | ||
81 | }, | ||
82 | }, | ||
83 | .verdict = -NF_ACCEPT - 1, | ||
84 | }, | ||
85 | }, | ||
86 | /* LOCAL_OUT */ | ||
87 | { | ||
88 | .entry = { | ||
89 | .target_offset = sizeof(struct ipt_entry), | ||
90 | .next_offset = sizeof(struct ipt_standard), | ||
91 | }, | ||
92 | .target = { | ||
93 | .target = { | ||
94 | .u = { | ||
95 | .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)), | ||
96 | }, | ||
97 | }, | ||
98 | .verdict = -NF_ACCEPT - 1, | ||
99 | }, | ||
100 | }, | ||
101 | }, | ||
102 | /* ERROR */ | ||
103 | .term = { | ||
104 | .entry = { | ||
105 | .target_offset = sizeof(struct ipt_entry), | ||
106 | .next_offset = sizeof(struct ipt_error), | ||
107 | }, | ||
108 | .target = { | ||
109 | .target = { | ||
110 | .u = { | ||
111 | .user = { | ||
112 | .target_size = IPT_ALIGN(sizeof(struct ipt_error_target)), | ||
113 | .name = IPT_ERROR_TARGET, | ||
114 | }, | ||
115 | }, | ||
116 | }, | ||
117 | .errorname = "ERROR", | ||
118 | }, | ||
119 | } | ||
120 | }; | ||
121 | |||
122 | static struct ipt_table nat_table = { | ||
123 | .name = "nat", | ||
124 | .valid_hooks = NAT_VALID_HOOKS, | ||
125 | .lock = RW_LOCK_UNLOCKED, | ||
126 | .me = THIS_MODULE, | ||
127 | .af = AF_INET, | ||
128 | }; | ||
129 | |||
130 | /* Source NAT */ | ||
131 | static unsigned int ipt_snat_target(struct sk_buff **pskb, | ||
132 | const struct net_device *in, | ||
133 | const struct net_device *out, | ||
134 | unsigned int hooknum, | ||
135 | const struct xt_target *target, | ||
136 | const void *targinfo) | ||
137 | { | ||
138 | struct nf_conn *ct; | ||
139 | enum ip_conntrack_info ctinfo; | ||
140 | const struct nf_nat_multi_range_compat *mr = targinfo; | ||
141 | |||
142 | NF_CT_ASSERT(hooknum == NF_IP_POST_ROUTING); | ||
143 | |||
144 | ct = nf_ct_get(*pskb, &ctinfo); | ||
145 | |||
146 | /* Connection must be valid and new. */ | ||
147 | NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || | ||
148 | ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); | ||
149 | NF_CT_ASSERT(out); | ||
150 | |||
151 | return nf_nat_setup_info(ct, &mr->range[0], hooknum); | ||
152 | } | ||
153 | |||
154 | /* Before 2.6.11 we did implicit source NAT if required. Warn about change. */ | ||
155 | static void warn_if_extra_mangle(__be32 dstip, __be32 srcip) | ||
156 | { | ||
157 | static int warned = 0; | ||
158 | struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dstip } } }; | ||
159 | struct rtable *rt; | ||
160 | |||
161 | if (ip_route_output_key(&rt, &fl) != 0) | ||
162 | return; | ||
163 | |||
164 | if (rt->rt_src != srcip && !warned) { | ||
165 | printk("NAT: no longer support implicit source local NAT\n"); | ||
166 | printk("NAT: packet src %u.%u.%u.%u -> dst %u.%u.%u.%u\n", | ||
167 | NIPQUAD(srcip), NIPQUAD(dstip)); | ||
168 | warned = 1; | ||
169 | } | ||
170 | ip_rt_put(rt); | ||
171 | } | ||
172 | |||
173 | static unsigned int ipt_dnat_target(struct sk_buff **pskb, | ||
174 | const struct net_device *in, | ||
175 | const struct net_device *out, | ||
176 | unsigned int hooknum, | ||
177 | const struct xt_target *target, | ||
178 | const void *targinfo) | ||
179 | { | ||
180 | struct nf_conn *ct; | ||
181 | enum ip_conntrack_info ctinfo; | ||
182 | const struct nf_nat_multi_range_compat *mr = targinfo; | ||
183 | |||
184 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || | ||
185 | hooknum == NF_IP_LOCAL_OUT); | ||
186 | |||
187 | ct = nf_ct_get(*pskb, &ctinfo); | ||
188 | |||
189 | /* Connection must be valid and new. */ | ||
190 | NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); | ||
191 | |||
192 | if (hooknum == NF_IP_LOCAL_OUT && | ||
193 | mr->range[0].flags & IP_NAT_RANGE_MAP_IPS) | ||
194 | warn_if_extra_mangle((*pskb)->nh.iph->daddr, | ||
195 | mr->range[0].min_ip); | ||
196 | |||
197 | return nf_nat_setup_info(ct, &mr->range[0], hooknum); | ||
198 | } | ||
199 | |||
200 | static int ipt_snat_checkentry(const char *tablename, | ||
201 | const void *entry, | ||
202 | const struct xt_target *target, | ||
203 | void *targinfo, | ||
204 | unsigned int hook_mask) | ||
205 | { | ||
206 | struct nf_nat_multi_range_compat *mr = targinfo; | ||
207 | |||
208 | /* Must be a valid range */ | ||
209 | if (mr->rangesize != 1) { | ||
210 | printk("SNAT: multiple ranges no longer supported\n"); | ||
211 | return 0; | ||
212 | } | ||
213 | return 1; | ||
214 | } | ||
215 | |||
216 | static int ipt_dnat_checkentry(const char *tablename, | ||
217 | const void *entry, | ||
218 | const struct xt_target *target, | ||
219 | void *targinfo, | ||
220 | unsigned int hook_mask) | ||
221 | { | ||
222 | struct nf_nat_multi_range_compat *mr = targinfo; | ||
223 | |||
224 | /* Must be a valid range */ | ||
225 | if (mr->rangesize != 1) { | ||
226 | printk("DNAT: multiple ranges no longer supported\n"); | ||
227 | return 0; | ||
228 | } | ||
229 | return 1; | ||
230 | } | ||
231 | |||
232 | inline unsigned int | ||
233 | alloc_null_binding(struct nf_conn *ct, | ||
234 | struct nf_nat_info *info, | ||
235 | unsigned int hooknum) | ||
236 | { | ||
237 | /* Force range to this IP; let proto decide mapping for | ||
238 | per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). | ||
239 | Use reply in case it's already been mangled (eg local packet). | ||
240 | */ | ||
241 | __be32 ip | ||
242 | = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC | ||
243 | ? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip | ||
244 | : ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip); | ||
245 | struct nf_nat_range range | ||
246 | = { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } }; | ||
247 | |||
248 | DEBUGP("Allocating NULL binding for %p (%u.%u.%u.%u)\n", | ||
249 | ct, NIPQUAD(ip)); | ||
250 | return nf_nat_setup_info(ct, &range, hooknum); | ||
251 | } | ||
252 | |||
253 | unsigned int | ||
254 | alloc_null_binding_confirmed(struct nf_conn *ct, | ||
255 | struct nf_nat_info *info, | ||
256 | unsigned int hooknum) | ||
257 | { | ||
258 | __be32 ip | ||
259 | = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC | ||
260 | ? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip | ||
261 | : ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip); | ||
262 | u_int16_t all | ||
263 | = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC | ||
264 | ? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all | ||
265 | : ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.all); | ||
266 | struct nf_nat_range range | ||
267 | = { IP_NAT_RANGE_MAP_IPS, ip, ip, { all }, { all } }; | ||
268 | |||
269 | DEBUGP("Allocating NULL binding for confirmed %p (%u.%u.%u.%u)\n", | ||
270 | ct, NIPQUAD(ip)); | ||
271 | return nf_nat_setup_info(ct, &range, hooknum); | ||
272 | } | ||
273 | |||
274 | int nf_nat_rule_find(struct sk_buff **pskb, | ||
275 | unsigned int hooknum, | ||
276 | const struct net_device *in, | ||
277 | const struct net_device *out, | ||
278 | struct nf_conn *ct, | ||
279 | struct nf_nat_info *info) | ||
280 | { | ||
281 | int ret; | ||
282 | |||
283 | ret = ipt_do_table(pskb, hooknum, in, out, &nat_table); | ||
284 | |||
285 | if (ret == NF_ACCEPT) { | ||
286 | if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum))) | ||
287 | /* NUL mapping */ | ||
288 | ret = alloc_null_binding(ct, info, hooknum); | ||
289 | } | ||
290 | return ret; | ||
291 | } | ||
292 | |||
293 | static struct ipt_target ipt_snat_reg = { | ||
294 | .name = "SNAT", | ||
295 | .target = ipt_snat_target, | ||
296 | .targetsize = sizeof(struct nf_nat_multi_range_compat), | ||
297 | .table = "nat", | ||
298 | .hooks = 1 << NF_IP_POST_ROUTING, | ||
299 | .checkentry = ipt_snat_checkentry, | ||
300 | .family = AF_INET, | ||
301 | }; | ||
302 | |||
303 | static struct xt_target ipt_dnat_reg = { | ||
304 | .name = "DNAT", | ||
305 | .target = ipt_dnat_target, | ||
306 | .targetsize = sizeof(struct nf_nat_multi_range_compat), | ||
307 | .table = "nat", | ||
308 | .hooks = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_OUT), | ||
309 | .checkentry = ipt_dnat_checkentry, | ||
310 | .family = AF_INET, | ||
311 | }; | ||
312 | |||
313 | int __init nf_nat_rule_init(void) | ||
314 | { | ||
315 | int ret; | ||
316 | |||
317 | ret = ipt_register_table(&nat_table, &nat_initial_table.repl); | ||
318 | if (ret != 0) | ||
319 | return ret; | ||
320 | ret = xt_register_target(&ipt_snat_reg); | ||
321 | if (ret != 0) | ||
322 | goto unregister_table; | ||
323 | |||
324 | ret = xt_register_target(&ipt_dnat_reg); | ||
325 | if (ret != 0) | ||
326 | goto unregister_snat; | ||
327 | |||
328 | return ret; | ||
329 | |||
330 | unregister_snat: | ||
331 | xt_unregister_target(&ipt_snat_reg); | ||
332 | unregister_table: | ||
333 | ipt_unregister_table(&nat_table); | ||
334 | |||
335 | return ret; | ||
336 | } | ||
337 | |||
338 | void nf_nat_rule_cleanup(void) | ||
339 | { | ||
340 | xt_unregister_target(&ipt_dnat_reg); | ||
341 | xt_unregister_target(&ipt_snat_reg); | ||
342 | ipt_unregister_table(&nat_table); | ||
343 | } | ||
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c new file mode 100644 index 000000000000..3d524b957310 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_sip.c | |||
@@ -0,0 +1,283 @@ | |||
1 | /* SIP extension for UDP NAT alteration. | ||
2 | * | ||
3 | * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar> | ||
4 | * based on RR's ip_nat_ftp.c and other modules. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/skbuff.h> | ||
13 | #include <linux/ip.h> | ||
14 | #include <linux/udp.h> | ||
15 | |||
16 | #include <net/netfilter/nf_nat.h> | ||
17 | #include <net/netfilter/nf_nat_helper.h> | ||
18 | #include <net/netfilter/nf_nat_rule.h> | ||
19 | #include <net/netfilter/nf_conntrack_helper.h> | ||
20 | #include <net/netfilter/nf_conntrack_expect.h> | ||
21 | #include <linux/netfilter/nf_conntrack_sip.h> | ||
22 | |||
23 | MODULE_LICENSE("GPL"); | ||
24 | MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>"); | ||
25 | MODULE_DESCRIPTION("SIP NAT helper"); | ||
26 | MODULE_ALIAS("ip_nat_sip"); | ||
27 | |||
28 | #if 0 | ||
29 | #define DEBUGP printk | ||
30 | #else | ||
31 | #define DEBUGP(format, args...) | ||
32 | #endif | ||
33 | |||
34 | struct addr_map { | ||
35 | struct { | ||
36 | char src[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; | ||
37 | char dst[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; | ||
38 | unsigned int srclen, srciplen; | ||
39 | unsigned int dstlen, dstiplen; | ||
40 | } addr[IP_CT_DIR_MAX]; | ||
41 | }; | ||
42 | |||
43 | static void addr_map_init(struct nf_conn *ct, struct addr_map *map) | ||
44 | { | ||
45 | struct nf_conntrack_tuple *t; | ||
46 | enum ip_conntrack_dir dir; | ||
47 | unsigned int n; | ||
48 | |||
49 | for (dir = 0; dir < IP_CT_DIR_MAX; dir++) { | ||
50 | t = &ct->tuplehash[dir].tuple; | ||
51 | |||
52 | n = sprintf(map->addr[dir].src, "%u.%u.%u.%u", | ||
53 | NIPQUAD(t->src.u3.ip)); | ||
54 | map->addr[dir].srciplen = n; | ||
55 | n += sprintf(map->addr[dir].src + n, ":%u", | ||
56 | ntohs(t->src.u.udp.port)); | ||
57 | map->addr[dir].srclen = n; | ||
58 | |||
59 | n = sprintf(map->addr[dir].dst, "%u.%u.%u.%u", | ||
60 | NIPQUAD(t->dst.u3.ip)); | ||
61 | map->addr[dir].dstiplen = n; | ||
62 | n += sprintf(map->addr[dir].dst + n, ":%u", | ||
63 | ntohs(t->dst.u.udp.port)); | ||
64 | map->addr[dir].dstlen = n; | ||
65 | } | ||
66 | } | ||
67 | |||
68 | static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo, | ||
69 | struct nf_conn *ct, const char **dptr, size_t dlen, | ||
70 | enum sip_header_pos pos, struct addr_map *map) | ||
71 | { | ||
72 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | ||
73 | unsigned int matchlen, matchoff, addrlen; | ||
74 | char *addr; | ||
75 | |||
76 | if (ct_sip_get_info(ct, *dptr, dlen, &matchoff, &matchlen, pos) <= 0) | ||
77 | return 1; | ||
78 | |||
79 | if ((matchlen == map->addr[dir].srciplen || | ||
80 | matchlen == map->addr[dir].srclen) && | ||
81 | memcmp(*dptr + matchoff, map->addr[dir].src, matchlen) == 0) { | ||
82 | addr = map->addr[!dir].dst; | ||
83 | addrlen = map->addr[!dir].dstlen; | ||
84 | } else if ((matchlen == map->addr[dir].dstiplen || | ||
85 | matchlen == map->addr[dir].dstlen) && | ||
86 | memcmp(*dptr + matchoff, map->addr[dir].dst, matchlen) == 0) { | ||
87 | addr = map->addr[!dir].src; | ||
88 | addrlen = map->addr[!dir].srclen; | ||
89 | } else | ||
90 | return 1; | ||
91 | |||
92 | if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, | ||
93 | matchoff, matchlen, addr, addrlen)) | ||
94 | return 0; | ||
95 | *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); | ||
96 | return 1; | ||
97 | |||
98 | } | ||
99 | |||
100 | static unsigned int ip_nat_sip(struct sk_buff **pskb, | ||
101 | enum ip_conntrack_info ctinfo, | ||
102 | struct nf_conn *ct, | ||
103 | const char **dptr) | ||
104 | { | ||
105 | enum sip_header_pos pos; | ||
106 | struct addr_map map; | ||
107 | int dataoff, datalen; | ||
108 | |||
109 | dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); | ||
110 | datalen = (*pskb)->len - dataoff; | ||
111 | if (datalen < sizeof("SIP/2.0") - 1) | ||
112 | return NF_DROP; | ||
113 | |||
114 | addr_map_init(ct, &map); | ||
115 | |||
116 | /* Basic rules: requests and responses. */ | ||
117 | if (strncmp(*dptr, "SIP/2.0", sizeof("SIP/2.0") - 1) != 0) { | ||
118 | /* 10.2: Constructing the REGISTER Request: | ||
119 | * | ||
120 | * The "userinfo" and "@" components of the SIP URI MUST NOT | ||
121 | * be present. | ||
122 | */ | ||
123 | if (datalen >= sizeof("REGISTER") - 1 && | ||
124 | strncmp(*dptr, "REGISTER", sizeof("REGISTER") - 1) == 0) | ||
125 | pos = POS_REG_REQ_URI; | ||
126 | else | ||
127 | pos = POS_REQ_URI; | ||
128 | |||
129 | if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, pos, &map)) | ||
130 | return NF_DROP; | ||
131 | } | ||
132 | |||
133 | if (!map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_FROM, &map) || | ||
134 | !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_TO, &map) || | ||
135 | !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_VIA, &map) || | ||
136 | !map_sip_addr(pskb, ctinfo, ct, dptr, datalen, POS_CONTACT, &map)) | ||
137 | return NF_DROP; | ||
138 | return NF_ACCEPT; | ||
139 | } | ||
140 | |||
141 | static unsigned int mangle_sip_packet(struct sk_buff **pskb, | ||
142 | enum ip_conntrack_info ctinfo, | ||
143 | struct nf_conn *ct, | ||
144 | const char **dptr, size_t dlen, | ||
145 | char *buffer, int bufflen, | ||
146 | enum sip_header_pos pos) | ||
147 | { | ||
148 | unsigned int matchlen, matchoff; | ||
149 | |||
150 | if (ct_sip_get_info(ct, *dptr, dlen, &matchoff, &matchlen, pos) <= 0) | ||
151 | return 0; | ||
152 | |||
153 | if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, | ||
154 | matchoff, matchlen, buffer, bufflen)) | ||
155 | return 0; | ||
156 | |||
157 | /* We need to reload this. Thanks Patrick. */ | ||
158 | *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); | ||
159 | return 1; | ||
160 | } | ||
161 | |||
162 | static int mangle_content_len(struct sk_buff **pskb, | ||
163 | enum ip_conntrack_info ctinfo, | ||
164 | struct nf_conn *ct, | ||
165 | const char *dptr) | ||
166 | { | ||
167 | unsigned int dataoff, matchoff, matchlen; | ||
168 | char buffer[sizeof("65536")]; | ||
169 | int bufflen; | ||
170 | |||
171 | dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); | ||
172 | |||
173 | /* Get actual SDP lenght */ | ||
174 | if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff, | ||
175 | &matchlen, POS_SDP_HEADER) > 0) { | ||
176 | |||
177 | /* since ct_sip_get_info() give us a pointer passing 'v=' | ||
178 | we need to add 2 bytes in this count. */ | ||
179 | int c_len = (*pskb)->len - dataoff - matchoff + 2; | ||
180 | |||
181 | /* Now, update SDP length */ | ||
182 | if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff, | ||
183 | &matchlen, POS_CONTENT) > 0) { | ||
184 | |||
185 | bufflen = sprintf(buffer, "%u", c_len); | ||
186 | return nf_nat_mangle_udp_packet(pskb, ct, ctinfo, | ||
187 | matchoff, matchlen, | ||
188 | buffer, bufflen); | ||
189 | } | ||
190 | } | ||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | static unsigned int mangle_sdp(struct sk_buff **pskb, | ||
195 | enum ip_conntrack_info ctinfo, | ||
196 | struct nf_conn *ct, | ||
197 | __be32 newip, u_int16_t port, | ||
198 | const char *dptr) | ||
199 | { | ||
200 | char buffer[sizeof("nnn.nnn.nnn.nnn")]; | ||
201 | unsigned int dataoff, bufflen; | ||
202 | |||
203 | dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); | ||
204 | |||
205 | /* Mangle owner and contact info. */ | ||
206 | bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); | ||
207 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | ||
208 | buffer, bufflen, POS_OWNER_IP4)) | ||
209 | return 0; | ||
210 | |||
211 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | ||
212 | buffer, bufflen, POS_CONNECTION_IP4)) | ||
213 | return 0; | ||
214 | |||
215 | /* Mangle media port. */ | ||
216 | bufflen = sprintf(buffer, "%u", port); | ||
217 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | ||
218 | buffer, bufflen, POS_MEDIA)) | ||
219 | return 0; | ||
220 | |||
221 | return mangle_content_len(pskb, ctinfo, ct, dptr); | ||
222 | } | ||
223 | |||
224 | /* So, this packet has hit the connection tracking matching code. | ||
225 | Mangle it, and change the expectation to match the new version. */ | ||
226 | static unsigned int ip_nat_sdp(struct sk_buff **pskb, | ||
227 | enum ip_conntrack_info ctinfo, | ||
228 | struct nf_conntrack_expect *exp, | ||
229 | const char *dptr) | ||
230 | { | ||
231 | struct nf_conn *ct = exp->master; | ||
232 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | ||
233 | __be32 newip; | ||
234 | u_int16_t port; | ||
235 | |||
236 | DEBUGP("ip_nat_sdp():\n"); | ||
237 | |||
238 | /* Connection will come from reply */ | ||
239 | newip = ct->tuplehash[!dir].tuple.dst.u3.ip; | ||
240 | |||
241 | exp->tuple.dst.u3.ip = newip; | ||
242 | exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port; | ||
243 | exp->dir = !dir; | ||
244 | |||
245 | /* When you see the packet, we need to NAT it the same as the | ||
246 | this one. */ | ||
247 | exp->expectfn = nf_nat_follow_master; | ||
248 | |||
249 | /* Try to get same port: if not, try to change it. */ | ||
250 | for (port = ntohs(exp->saved_proto.udp.port); port != 0; port++) { | ||
251 | exp->tuple.dst.u.udp.port = htons(port); | ||
252 | if (nf_conntrack_expect_related(exp) == 0) | ||
253 | break; | ||
254 | } | ||
255 | |||
256 | if (port == 0) | ||
257 | return NF_DROP; | ||
258 | |||
259 | if (!mangle_sdp(pskb, ctinfo, ct, newip, port, dptr)) { | ||
260 | nf_conntrack_unexpect_related(exp); | ||
261 | return NF_DROP; | ||
262 | } | ||
263 | return NF_ACCEPT; | ||
264 | } | ||
265 | |||
266 | static void __exit nf_nat_sip_fini(void) | ||
267 | { | ||
268 | rcu_assign_pointer(nf_nat_sip_hook, NULL); | ||
269 | rcu_assign_pointer(nf_nat_sdp_hook, NULL); | ||
270 | synchronize_rcu(); | ||
271 | } | ||
272 | |||
273 | static int __init nf_nat_sip_init(void) | ||
274 | { | ||
275 | BUG_ON(rcu_dereference(nf_nat_sip_hook)); | ||
276 | BUG_ON(rcu_dereference(nf_nat_sdp_hook)); | ||
277 | rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); | ||
278 | rcu_assign_pointer(nf_nat_sdp_hook, ip_nat_sdp); | ||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | module_init(nf_nat_sip_init); | ||
283 | module_exit(nf_nat_sip_fini); | ||
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c new file mode 100644 index 000000000000..f12528fe1bf9 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c | |||
@@ -0,0 +1,1332 @@ | |||
1 | /* | ||
2 | * nf_nat_snmp_basic.c | ||
3 | * | ||
4 | * Basic SNMP Application Layer Gateway | ||
5 | * | ||
6 | * This IP NAT module is intended for use with SNMP network | ||
7 | * discovery and monitoring applications where target networks use | ||
8 | * conflicting private address realms. | ||
9 | * | ||
10 | * Static NAT is used to remap the networks from the view of the network | ||
11 | * management system at the IP layer, and this module remaps some application | ||
12 | * layer addresses to match. | ||
13 | * | ||
14 | * The simplest form of ALG is performed, where only tagged IP addresses | ||
15 | * are modified. The module does not need to be MIB aware and only scans | ||
16 | * messages at the ASN.1/BER level. | ||
17 | * | ||
18 | * Currently, only SNMPv1 and SNMPv2 are supported. | ||
19 | * | ||
20 | * More information on ALG and associated issues can be found in | ||
21 | * RFC 2962 | ||
22 | * | ||
23 | * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory | ||
24 | * McLean & Jochen Friedrich, stripped down for use in the kernel. | ||
25 | * | ||
26 | * Copyright (c) 2000 RP Internet (www.rpi.net.au). | ||
27 | * | ||
28 | * This program is free software; you can redistribute it and/or modify | ||
29 | * it under the terms of the GNU General Public License as published by | ||
30 | * the Free Software Foundation; either version 2 of the License, or | ||
31 | * (at your option) any later version. | ||
32 | * This program is distributed in the hope that it will be useful, | ||
33 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
34 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
35 | * GNU General Public License for more details. | ||
36 | * You should have received a copy of the GNU General Public License | ||
37 | * along with this program; if not, write to the Free Software | ||
38 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
39 | * | ||
40 | * Author: James Morris <jmorris@intercode.com.au> | ||
41 | * | ||
42 | * Updates: | ||
43 | * 2000-08-06: Convert to new helper API (Harald Welte). | ||
44 | * | ||
45 | */ | ||
46 | #include <linux/module.h> | ||
47 | #include <linux/moduleparam.h> | ||
48 | #include <linux/types.h> | ||
49 | #include <linux/kernel.h> | ||
50 | #include <linux/in.h> | ||
51 | #include <linux/ip.h> | ||
52 | #include <linux/udp.h> | ||
53 | #include <net/checksum.h> | ||
54 | #include <net/udp.h> | ||
55 | |||
56 | #include <net/netfilter/nf_nat.h> | ||
57 | #include <net/netfilter/nf_conntrack_helper.h> | ||
58 | #include <net/netfilter/nf_nat_helper.h> | ||
59 | |||
60 | MODULE_LICENSE("GPL"); | ||
61 | MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); | ||
62 | MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway"); | ||
63 | MODULE_ALIAS("ip_nat_snmp_basic"); | ||
64 | |||
65 | #define SNMP_PORT 161 | ||
66 | #define SNMP_TRAP_PORT 162 | ||
67 | #define NOCT1(n) (*(u8 *)n) | ||
68 | |||
69 | static int debug; | ||
70 | static DEFINE_SPINLOCK(snmp_lock); | ||
71 | |||
72 | /* | ||
73 | * Application layer address mapping mimics the NAT mapping, but | ||
74 | * only for the first octet in this case (a more flexible system | ||
75 | * can be implemented if needed). | ||
76 | */ | ||
77 | struct oct1_map | ||
78 | { | ||
79 | u_int8_t from; | ||
80 | u_int8_t to; | ||
81 | }; | ||
82 | |||
83 | |||
84 | /***************************************************************************** | ||
85 | * | ||
86 | * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse) | ||
87 | * | ||
88 | *****************************************************************************/ | ||
89 | |||
90 | /* Class */ | ||
91 | #define ASN1_UNI 0 /* Universal */ | ||
92 | #define ASN1_APL 1 /* Application */ | ||
93 | #define ASN1_CTX 2 /* Context */ | ||
94 | #define ASN1_PRV 3 /* Private */ | ||
95 | |||
96 | /* Tag */ | ||
97 | #define ASN1_EOC 0 /* End Of Contents */ | ||
98 | #define ASN1_BOL 1 /* Boolean */ | ||
99 | #define ASN1_INT 2 /* Integer */ | ||
100 | #define ASN1_BTS 3 /* Bit String */ | ||
101 | #define ASN1_OTS 4 /* Octet String */ | ||
102 | #define ASN1_NUL 5 /* Null */ | ||
103 | #define ASN1_OJI 6 /* Object Identifier */ | ||
104 | #define ASN1_OJD 7 /* Object Description */ | ||
105 | #define ASN1_EXT 8 /* External */ | ||
106 | #define ASN1_SEQ 16 /* Sequence */ | ||
107 | #define ASN1_SET 17 /* Set */ | ||
108 | #define ASN1_NUMSTR 18 /* Numerical String */ | ||
109 | #define ASN1_PRNSTR 19 /* Printable String */ | ||
110 | #define ASN1_TEXSTR 20 /* Teletext String */ | ||
111 | #define ASN1_VIDSTR 21 /* Video String */ | ||
112 | #define ASN1_IA5STR 22 /* IA5 String */ | ||
113 | #define ASN1_UNITIM 23 /* Universal Time */ | ||
114 | #define ASN1_GENTIM 24 /* General Time */ | ||
115 | #define ASN1_GRASTR 25 /* Graphical String */ | ||
116 | #define ASN1_VISSTR 26 /* Visible String */ | ||
117 | #define ASN1_GENSTR 27 /* General String */ | ||
118 | |||
119 | /* Primitive / Constructed methods*/ | ||
120 | #define ASN1_PRI 0 /* Primitive */ | ||
121 | #define ASN1_CON 1 /* Constructed */ | ||
122 | |||
123 | /* | ||
124 | * Error codes. | ||
125 | */ | ||
126 | #define ASN1_ERR_NOERROR 0 | ||
127 | #define ASN1_ERR_DEC_EMPTY 2 | ||
128 | #define ASN1_ERR_DEC_EOC_MISMATCH 3 | ||
129 | #define ASN1_ERR_DEC_LENGTH_MISMATCH 4 | ||
130 | #define ASN1_ERR_DEC_BADVALUE 5 | ||
131 | |||
132 | /* | ||
133 | * ASN.1 context. | ||
134 | */ | ||
135 | struct asn1_ctx | ||
136 | { | ||
137 | int error; /* Error condition */ | ||
138 | unsigned char *pointer; /* Octet just to be decoded */ | ||
139 | unsigned char *begin; /* First octet */ | ||
140 | unsigned char *end; /* Octet after last octet */ | ||
141 | }; | ||
142 | |||
143 | /* | ||
144 | * Octet string (not null terminated) | ||
145 | */ | ||
146 | struct asn1_octstr | ||
147 | { | ||
148 | unsigned char *data; | ||
149 | unsigned int len; | ||
150 | }; | ||
151 | |||
152 | static void asn1_open(struct asn1_ctx *ctx, | ||
153 | unsigned char *buf, | ||
154 | unsigned int len) | ||
155 | { | ||
156 | ctx->begin = buf; | ||
157 | ctx->end = buf + len; | ||
158 | ctx->pointer = buf; | ||
159 | ctx->error = ASN1_ERR_NOERROR; | ||
160 | } | ||
161 | |||
162 | static unsigned char asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch) | ||
163 | { | ||
164 | if (ctx->pointer >= ctx->end) { | ||
165 | ctx->error = ASN1_ERR_DEC_EMPTY; | ||
166 | return 0; | ||
167 | } | ||
168 | *ch = *(ctx->pointer)++; | ||
169 | return 1; | ||
170 | } | ||
171 | |||
172 | static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) | ||
173 | { | ||
174 | unsigned char ch; | ||
175 | |||
176 | *tag = 0; | ||
177 | |||
178 | do | ||
179 | { | ||
180 | if (!asn1_octet_decode(ctx, &ch)) | ||
181 | return 0; | ||
182 | *tag <<= 7; | ||
183 | *tag |= ch & 0x7F; | ||
184 | } while ((ch & 0x80) == 0x80); | ||
185 | return 1; | ||
186 | } | ||
187 | |||
188 | static unsigned char asn1_id_decode(struct asn1_ctx *ctx, | ||
189 | unsigned int *cls, | ||
190 | unsigned int *con, | ||
191 | unsigned int *tag) | ||
192 | { | ||
193 | unsigned char ch; | ||
194 | |||
195 | if (!asn1_octet_decode(ctx, &ch)) | ||
196 | return 0; | ||
197 | |||
198 | *cls = (ch & 0xC0) >> 6; | ||
199 | *con = (ch & 0x20) >> 5; | ||
200 | *tag = (ch & 0x1F); | ||
201 | |||
202 | if (*tag == 0x1F) { | ||
203 | if (!asn1_tag_decode(ctx, tag)) | ||
204 | return 0; | ||
205 | } | ||
206 | return 1; | ||
207 | } | ||
208 | |||
209 | static unsigned char asn1_length_decode(struct asn1_ctx *ctx, | ||
210 | unsigned int *def, | ||
211 | unsigned int *len) | ||
212 | { | ||
213 | unsigned char ch, cnt; | ||
214 | |||
215 | if (!asn1_octet_decode(ctx, &ch)) | ||
216 | return 0; | ||
217 | |||
218 | if (ch == 0x80) | ||
219 | *def = 0; | ||
220 | else { | ||
221 | *def = 1; | ||
222 | |||
223 | if (ch < 0x80) | ||
224 | *len = ch; | ||
225 | else { | ||
226 | cnt = (unsigned char) (ch & 0x7F); | ||
227 | *len = 0; | ||
228 | |||
229 | while (cnt > 0) { | ||
230 | if (!asn1_octet_decode(ctx, &ch)) | ||
231 | return 0; | ||
232 | *len <<= 8; | ||
233 | *len |= ch; | ||
234 | cnt--; | ||
235 | } | ||
236 | } | ||
237 | } | ||
238 | return 1; | ||
239 | } | ||
240 | |||
241 | static unsigned char asn1_header_decode(struct asn1_ctx *ctx, | ||
242 | unsigned char **eoc, | ||
243 | unsigned int *cls, | ||
244 | unsigned int *con, | ||
245 | unsigned int *tag) | ||
246 | { | ||
247 | unsigned int def, len; | ||
248 | |||
249 | if (!asn1_id_decode(ctx, cls, con, tag)) | ||
250 | return 0; | ||
251 | |||
252 | def = len = 0; | ||
253 | if (!asn1_length_decode(ctx, &def, &len)) | ||
254 | return 0; | ||
255 | |||
256 | if (def) | ||
257 | *eoc = ctx->pointer + len; | ||
258 | else | ||
259 | *eoc = NULL; | ||
260 | return 1; | ||
261 | } | ||
262 | |||
263 | static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc) | ||
264 | { | ||
265 | unsigned char ch; | ||
266 | |||
267 | if (eoc == 0) { | ||
268 | if (!asn1_octet_decode(ctx, &ch)) | ||
269 | return 0; | ||
270 | |||
271 | if (ch != 0x00) { | ||
272 | ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; | ||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | if (!asn1_octet_decode(ctx, &ch)) | ||
277 | return 0; | ||
278 | |||
279 | if (ch != 0x00) { | ||
280 | ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; | ||
281 | return 0; | ||
282 | } | ||
283 | return 1; | ||
284 | } else { | ||
285 | if (ctx->pointer != eoc) { | ||
286 | ctx->error = ASN1_ERR_DEC_LENGTH_MISMATCH; | ||
287 | return 0; | ||
288 | } | ||
289 | return 1; | ||
290 | } | ||
291 | } | ||
292 | |||
293 | static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc) | ||
294 | { | ||
295 | ctx->pointer = eoc; | ||
296 | return 1; | ||
297 | } | ||
298 | |||
299 | static unsigned char asn1_long_decode(struct asn1_ctx *ctx, | ||
300 | unsigned char *eoc, | ||
301 | long *integer) | ||
302 | { | ||
303 | unsigned char ch; | ||
304 | unsigned int len; | ||
305 | |||
306 | if (!asn1_octet_decode(ctx, &ch)) | ||
307 | return 0; | ||
308 | |||
309 | *integer = (signed char) ch; | ||
310 | len = 1; | ||
311 | |||
312 | while (ctx->pointer < eoc) { | ||
313 | if (++len > sizeof (long)) { | ||
314 | ctx->error = ASN1_ERR_DEC_BADVALUE; | ||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | if (!asn1_octet_decode(ctx, &ch)) | ||
319 | return 0; | ||
320 | |||
321 | *integer <<= 8; | ||
322 | *integer |= ch; | ||
323 | } | ||
324 | return 1; | ||
325 | } | ||
326 | |||
327 | static unsigned char asn1_uint_decode(struct asn1_ctx *ctx, | ||
328 | unsigned char *eoc, | ||
329 | unsigned int *integer) | ||
330 | { | ||
331 | unsigned char ch; | ||
332 | unsigned int len; | ||
333 | |||
334 | if (!asn1_octet_decode(ctx, &ch)) | ||
335 | return 0; | ||
336 | |||
337 | *integer = ch; | ||
338 | if (ch == 0) len = 0; | ||
339 | else len = 1; | ||
340 | |||
341 | while (ctx->pointer < eoc) { | ||
342 | if (++len > sizeof (unsigned int)) { | ||
343 | ctx->error = ASN1_ERR_DEC_BADVALUE; | ||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | if (!asn1_octet_decode(ctx, &ch)) | ||
348 | return 0; | ||
349 | |||
350 | *integer <<= 8; | ||
351 | *integer |= ch; | ||
352 | } | ||
353 | return 1; | ||
354 | } | ||
355 | |||
356 | static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx, | ||
357 | unsigned char *eoc, | ||
358 | unsigned long *integer) | ||
359 | { | ||
360 | unsigned char ch; | ||
361 | unsigned int len; | ||
362 | |||
363 | if (!asn1_octet_decode(ctx, &ch)) | ||
364 | return 0; | ||
365 | |||
366 | *integer = ch; | ||
367 | if (ch == 0) len = 0; | ||
368 | else len = 1; | ||
369 | |||
370 | while (ctx->pointer < eoc) { | ||
371 | if (++len > sizeof (unsigned long)) { | ||
372 | ctx->error = ASN1_ERR_DEC_BADVALUE; | ||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | if (!asn1_octet_decode(ctx, &ch)) | ||
377 | return 0; | ||
378 | |||
379 | *integer <<= 8; | ||
380 | *integer |= ch; | ||
381 | } | ||
382 | return 1; | ||
383 | } | ||
384 | |||
385 | static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, | ||
386 | unsigned char *eoc, | ||
387 | unsigned char **octets, | ||
388 | unsigned int *len) | ||
389 | { | ||
390 | unsigned char *ptr; | ||
391 | |||
392 | *len = 0; | ||
393 | |||
394 | *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); | ||
395 | if (*octets == NULL) { | ||
396 | if (net_ratelimit()) | ||
397 | printk("OOM in bsalg (%d)\n", __LINE__); | ||
398 | return 0; | ||
399 | } | ||
400 | |||
401 | ptr = *octets; | ||
402 | while (ctx->pointer < eoc) { | ||
403 | if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) { | ||
404 | kfree(*octets); | ||
405 | *octets = NULL; | ||
406 | return 0; | ||
407 | } | ||
408 | (*len)++; | ||
409 | } | ||
410 | return 1; | ||
411 | } | ||
412 | |||
413 | static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, | ||
414 | unsigned long *subid) | ||
415 | { | ||
416 | unsigned char ch; | ||
417 | |||
418 | *subid = 0; | ||
419 | |||
420 | do { | ||
421 | if (!asn1_octet_decode(ctx, &ch)) | ||
422 | return 0; | ||
423 | |||
424 | *subid <<= 7; | ||
425 | *subid |= ch & 0x7F; | ||
426 | } while ((ch & 0x80) == 0x80); | ||
427 | return 1; | ||
428 | } | ||
429 | |||
430 | static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, | ||
431 | unsigned char *eoc, | ||
432 | unsigned long **oid, | ||
433 | unsigned int *len) | ||
434 | { | ||
435 | unsigned long subid; | ||
436 | unsigned int size; | ||
437 | unsigned long *optr; | ||
438 | |||
439 | size = eoc - ctx->pointer + 1; | ||
440 | *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); | ||
441 | if (*oid == NULL) { | ||
442 | if (net_ratelimit()) | ||
443 | printk("OOM in bsalg (%d)\n", __LINE__); | ||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | optr = *oid; | ||
448 | |||
449 | if (!asn1_subid_decode(ctx, &subid)) { | ||
450 | kfree(*oid); | ||
451 | *oid = NULL; | ||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | if (subid < 40) { | ||
456 | optr [0] = 0; | ||
457 | optr [1] = subid; | ||
458 | } else if (subid < 80) { | ||
459 | optr [0] = 1; | ||
460 | optr [1] = subid - 40; | ||
461 | } else { | ||
462 | optr [0] = 2; | ||
463 | optr [1] = subid - 80; | ||
464 | } | ||
465 | |||
466 | *len = 2; | ||
467 | optr += 2; | ||
468 | |||
469 | while (ctx->pointer < eoc) { | ||
470 | if (++(*len) > size) { | ||
471 | ctx->error = ASN1_ERR_DEC_BADVALUE; | ||
472 | kfree(*oid); | ||
473 | *oid = NULL; | ||
474 | return 0; | ||
475 | } | ||
476 | |||
477 | if (!asn1_subid_decode(ctx, optr++)) { | ||
478 | kfree(*oid); | ||
479 | *oid = NULL; | ||
480 | return 0; | ||
481 | } | ||
482 | } | ||
483 | return 1; | ||
484 | } | ||
485 | |||
486 | /***************************************************************************** | ||
487 | * | ||
488 | * SNMP decoding routines (gxsnmp author Dirk Wisse) | ||
489 | * | ||
490 | *****************************************************************************/ | ||
491 | |||
492 | /* SNMP Versions */ | ||
493 | #define SNMP_V1 0 | ||
494 | #define SNMP_V2C 1 | ||
495 | #define SNMP_V2 2 | ||
496 | #define SNMP_V3 3 | ||
497 | |||
498 | /* Default Sizes */ | ||
499 | #define SNMP_SIZE_COMM 256 | ||
500 | #define SNMP_SIZE_OBJECTID 128 | ||
501 | #define SNMP_SIZE_BUFCHR 256 | ||
502 | #define SNMP_SIZE_BUFINT 128 | ||
503 | #define SNMP_SIZE_SMALLOBJECTID 16 | ||
504 | |||
505 | /* Requests */ | ||
506 | #define SNMP_PDU_GET 0 | ||
507 | #define SNMP_PDU_NEXT 1 | ||
508 | #define SNMP_PDU_RESPONSE 2 | ||
509 | #define SNMP_PDU_SET 3 | ||
510 | #define SNMP_PDU_TRAP1 4 | ||
511 | #define SNMP_PDU_BULK 5 | ||
512 | #define SNMP_PDU_INFORM 6 | ||
513 | #define SNMP_PDU_TRAP2 7 | ||
514 | |||
515 | /* Errors */ | ||
516 | #define SNMP_NOERROR 0 | ||
517 | #define SNMP_TOOBIG 1 | ||
518 | #define SNMP_NOSUCHNAME 2 | ||
519 | #define SNMP_BADVALUE 3 | ||
520 | #define SNMP_READONLY 4 | ||
521 | #define SNMP_GENERROR 5 | ||
522 | #define SNMP_NOACCESS 6 | ||
523 | #define SNMP_WRONGTYPE 7 | ||
524 | #define SNMP_WRONGLENGTH 8 | ||
525 | #define SNMP_WRONGENCODING 9 | ||
526 | #define SNMP_WRONGVALUE 10 | ||
527 | #define SNMP_NOCREATION 11 | ||
528 | #define SNMP_INCONSISTENTVALUE 12 | ||
529 | #define SNMP_RESOURCEUNAVAILABLE 13 | ||
530 | #define SNMP_COMMITFAILED 14 | ||
531 | #define SNMP_UNDOFAILED 15 | ||
532 | #define SNMP_AUTHORIZATIONERROR 16 | ||
533 | #define SNMP_NOTWRITABLE 17 | ||
534 | #define SNMP_INCONSISTENTNAME 18 | ||
535 | |||
536 | /* General SNMP V1 Traps */ | ||
537 | #define SNMP_TRAP_COLDSTART 0 | ||
538 | #define SNMP_TRAP_WARMSTART 1 | ||
539 | #define SNMP_TRAP_LINKDOWN 2 | ||
540 | #define SNMP_TRAP_LINKUP 3 | ||
541 | #define SNMP_TRAP_AUTFAILURE 4 | ||
542 | #define SNMP_TRAP_EQPNEIGHBORLOSS 5 | ||
543 | #define SNMP_TRAP_ENTSPECIFIC 6 | ||
544 | |||
545 | /* SNMPv1 Types */ | ||
546 | #define SNMP_NULL 0 | ||
547 | #define SNMP_INTEGER 1 /* l */ | ||
548 | #define SNMP_OCTETSTR 2 /* c */ | ||
549 | #define SNMP_DISPLAYSTR 2 /* c */ | ||
550 | #define SNMP_OBJECTID 3 /* ul */ | ||
551 | #define SNMP_IPADDR 4 /* uc */ | ||
552 | #define SNMP_COUNTER 5 /* ul */ | ||
553 | #define SNMP_GAUGE 6 /* ul */ | ||
554 | #define SNMP_TIMETICKS 7 /* ul */ | ||
555 | #define SNMP_OPAQUE 8 /* c */ | ||
556 | |||
557 | /* Additional SNMPv2 Types */ | ||
558 | #define SNMP_UINTEGER 5 /* ul */ | ||
559 | #define SNMP_BITSTR 9 /* uc */ | ||
560 | #define SNMP_NSAP 10 /* uc */ | ||
561 | #define SNMP_COUNTER64 11 /* ul */ | ||
562 | #define SNMP_NOSUCHOBJECT 12 | ||
563 | #define SNMP_NOSUCHINSTANCE 13 | ||
564 | #define SNMP_ENDOFMIBVIEW 14 | ||
565 | |||
566 | union snmp_syntax | ||
567 | { | ||
568 | unsigned char uc[0]; /* 8 bit unsigned */ | ||
569 | char c[0]; /* 8 bit signed */ | ||
570 | unsigned long ul[0]; /* 32 bit unsigned */ | ||
571 | long l[0]; /* 32 bit signed */ | ||
572 | }; | ||
573 | |||
574 | struct snmp_object | ||
575 | { | ||
576 | unsigned long *id; | ||
577 | unsigned int id_len; | ||
578 | unsigned short type; | ||
579 | unsigned int syntax_len; | ||
580 | union snmp_syntax syntax; | ||
581 | }; | ||
582 | |||
583 | struct snmp_request | ||
584 | { | ||
585 | unsigned long id; | ||
586 | unsigned int error_status; | ||
587 | unsigned int error_index; | ||
588 | }; | ||
589 | |||
590 | struct snmp_v1_trap | ||
591 | { | ||
592 | unsigned long *id; | ||
593 | unsigned int id_len; | ||
594 | unsigned long ip_address; /* pointer */ | ||
595 | unsigned int general; | ||
596 | unsigned int specific; | ||
597 | unsigned long time; | ||
598 | }; | ||
599 | |||
600 | /* SNMP types */ | ||
601 | #define SNMP_IPA 0 | ||
602 | #define SNMP_CNT 1 | ||
603 | #define SNMP_GGE 2 | ||
604 | #define SNMP_TIT 3 | ||
605 | #define SNMP_OPQ 4 | ||
606 | #define SNMP_C64 6 | ||
607 | |||
608 | /* SNMP errors */ | ||
609 | #define SERR_NSO 0 | ||
610 | #define SERR_NSI 1 | ||
611 | #define SERR_EOM 2 | ||
612 | |||
613 | static inline void mangle_address(unsigned char *begin, | ||
614 | unsigned char *addr, | ||
615 | const struct oct1_map *map, | ||
616 | __sum16 *check); | ||
617 | struct snmp_cnv | ||
618 | { | ||
619 | unsigned int class; | ||
620 | unsigned int tag; | ||
621 | int syntax; | ||
622 | }; | ||
623 | |||
624 | static struct snmp_cnv snmp_conv [] = | ||
625 | { | ||
626 | {ASN1_UNI, ASN1_NUL, SNMP_NULL}, | ||
627 | {ASN1_UNI, ASN1_INT, SNMP_INTEGER}, | ||
628 | {ASN1_UNI, ASN1_OTS, SNMP_OCTETSTR}, | ||
629 | {ASN1_UNI, ASN1_OTS, SNMP_DISPLAYSTR}, | ||
630 | {ASN1_UNI, ASN1_OJI, SNMP_OBJECTID}, | ||
631 | {ASN1_APL, SNMP_IPA, SNMP_IPADDR}, | ||
632 | {ASN1_APL, SNMP_CNT, SNMP_COUNTER}, /* Counter32 */ | ||
633 | {ASN1_APL, SNMP_GGE, SNMP_GAUGE}, /* Gauge32 == Unsigned32 */ | ||
634 | {ASN1_APL, SNMP_TIT, SNMP_TIMETICKS}, | ||
635 | {ASN1_APL, SNMP_OPQ, SNMP_OPAQUE}, | ||
636 | |||
637 | /* SNMPv2 data types and errors */ | ||
638 | {ASN1_UNI, ASN1_BTS, SNMP_BITSTR}, | ||
639 | {ASN1_APL, SNMP_C64, SNMP_COUNTER64}, | ||
640 | {ASN1_CTX, SERR_NSO, SNMP_NOSUCHOBJECT}, | ||
641 | {ASN1_CTX, SERR_NSI, SNMP_NOSUCHINSTANCE}, | ||
642 | {ASN1_CTX, SERR_EOM, SNMP_ENDOFMIBVIEW}, | ||
643 | {0, 0, -1} | ||
644 | }; | ||
645 | |||
646 | static unsigned char snmp_tag_cls2syntax(unsigned int tag, | ||
647 | unsigned int cls, | ||
648 | unsigned short *syntax) | ||
649 | { | ||
650 | struct snmp_cnv *cnv; | ||
651 | |||
652 | cnv = snmp_conv; | ||
653 | |||
654 | while (cnv->syntax != -1) { | ||
655 | if (cnv->tag == tag && cnv->class == cls) { | ||
656 | *syntax = cnv->syntax; | ||
657 | return 1; | ||
658 | } | ||
659 | cnv++; | ||
660 | } | ||
661 | return 0; | ||
662 | } | ||
663 | |||
664 | static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | ||
665 | struct snmp_object **obj) | ||
666 | { | ||
667 | unsigned int cls, con, tag, len, idlen; | ||
668 | unsigned short type; | ||
669 | unsigned char *eoc, *end, *p; | ||
670 | unsigned long *lp, *id; | ||
671 | unsigned long ul; | ||
672 | long l; | ||
673 | |||
674 | *obj = NULL; | ||
675 | id = NULL; | ||
676 | |||
677 | if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag)) | ||
678 | return 0; | ||
679 | |||
680 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) | ||
681 | return 0; | ||
682 | |||
683 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | ||
684 | return 0; | ||
685 | |||
686 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) | ||
687 | return 0; | ||
688 | |||
689 | if (!asn1_oid_decode(ctx, end, &id, &idlen)) | ||
690 | return 0; | ||
691 | |||
692 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) { | ||
693 | kfree(id); | ||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | if (con != ASN1_PRI) { | ||
698 | kfree(id); | ||
699 | return 0; | ||
700 | } | ||
701 | |||
702 | type = 0; | ||
703 | if (!snmp_tag_cls2syntax(tag, cls, &type)) { | ||
704 | kfree(id); | ||
705 | return 0; | ||
706 | } | ||
707 | |||
708 | l = 0; | ||
709 | switch (type) { | ||
710 | case SNMP_INTEGER: | ||
711 | len = sizeof(long); | ||
712 | if (!asn1_long_decode(ctx, end, &l)) { | ||
713 | kfree(id); | ||
714 | return 0; | ||
715 | } | ||
716 | *obj = kmalloc(sizeof(struct snmp_object) + len, | ||
717 | GFP_ATOMIC); | ||
718 | if (*obj == NULL) { | ||
719 | kfree(id); | ||
720 | if (net_ratelimit()) | ||
721 | printk("OOM in bsalg (%d)\n", __LINE__); | ||
722 | return 0; | ||
723 | } | ||
724 | (*obj)->syntax.l[0] = l; | ||
725 | break; | ||
726 | case SNMP_OCTETSTR: | ||
727 | case SNMP_OPAQUE: | ||
728 | if (!asn1_octets_decode(ctx, end, &p, &len)) { | ||
729 | kfree(id); | ||
730 | return 0; | ||
731 | } | ||
732 | *obj = kmalloc(sizeof(struct snmp_object) + len, | ||
733 | GFP_ATOMIC); | ||
734 | if (*obj == NULL) { | ||
735 | kfree(id); | ||
736 | if (net_ratelimit()) | ||
737 | printk("OOM in bsalg (%d)\n", __LINE__); | ||
738 | return 0; | ||
739 | } | ||
740 | memcpy((*obj)->syntax.c, p, len); | ||
741 | kfree(p); | ||
742 | break; | ||
743 | case SNMP_NULL: | ||
744 | case SNMP_NOSUCHOBJECT: | ||
745 | case SNMP_NOSUCHINSTANCE: | ||
746 | case SNMP_ENDOFMIBVIEW: | ||
747 | len = 0; | ||
748 | *obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC); | ||
749 | if (*obj == NULL) { | ||
750 | kfree(id); | ||
751 | if (net_ratelimit()) | ||
752 | printk("OOM in bsalg (%d)\n", __LINE__); | ||
753 | return 0; | ||
754 | } | ||
755 | if (!asn1_null_decode(ctx, end)) { | ||
756 | kfree(id); | ||
757 | kfree(*obj); | ||
758 | *obj = NULL; | ||
759 | return 0; | ||
760 | } | ||
761 | break; | ||
762 | case SNMP_OBJECTID: | ||
763 | if (!asn1_oid_decode(ctx, end, (unsigned long **)&lp, &len)) { | ||
764 | kfree(id); | ||
765 | return 0; | ||
766 | } | ||
767 | len *= sizeof(unsigned long); | ||
768 | *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); | ||
769 | if (*obj == NULL) { | ||
770 | kfree(lp); | ||
771 | kfree(id); | ||
772 | if (net_ratelimit()) | ||
773 | printk("OOM in bsalg (%d)\n", __LINE__); | ||
774 | return 0; | ||
775 | } | ||
776 | memcpy((*obj)->syntax.ul, lp, len); | ||
777 | kfree(lp); | ||
778 | break; | ||
779 | case SNMP_IPADDR: | ||
780 | if (!asn1_octets_decode(ctx, end, &p, &len)) { | ||
781 | kfree(id); | ||
782 | return 0; | ||
783 | } | ||
784 | if (len != 4) { | ||
785 | kfree(p); | ||
786 | kfree(id); | ||
787 | return 0; | ||
788 | } | ||
789 | *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); | ||
790 | if (*obj == NULL) { | ||
791 | kfree(p); | ||
792 | kfree(id); | ||
793 | if (net_ratelimit()) | ||
794 | printk("OOM in bsalg (%d)\n", __LINE__); | ||
795 | return 0; | ||
796 | } | ||
797 | memcpy((*obj)->syntax.uc, p, len); | ||
798 | kfree(p); | ||
799 | break; | ||
800 | case SNMP_COUNTER: | ||
801 | case SNMP_GAUGE: | ||
802 | case SNMP_TIMETICKS: | ||
803 | len = sizeof(unsigned long); | ||
804 | if (!asn1_ulong_decode(ctx, end, &ul)) { | ||
805 | kfree(id); | ||
806 | return 0; | ||
807 | } | ||
808 | *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); | ||
809 | if (*obj == NULL) { | ||
810 | kfree(id); | ||
811 | if (net_ratelimit()) | ||
812 | printk("OOM in bsalg (%d)\n", __LINE__); | ||
813 | return 0; | ||
814 | } | ||
815 | (*obj)->syntax.ul[0] = ul; | ||
816 | break; | ||
817 | default: | ||
818 | kfree(id); | ||
819 | return 0; | ||
820 | } | ||
821 | |||
822 | (*obj)->syntax_len = len; | ||
823 | (*obj)->type = type; | ||
824 | (*obj)->id = id; | ||
825 | (*obj)->id_len = idlen; | ||
826 | |||
827 | if (!asn1_eoc_decode(ctx, eoc)) { | ||
828 | kfree(id); | ||
829 | kfree(*obj); | ||
830 | *obj = NULL; | ||
831 | return 0; | ||
832 | } | ||
833 | return 1; | ||
834 | } | ||
835 | |||
836 | static unsigned char snmp_request_decode(struct asn1_ctx *ctx, | ||
837 | struct snmp_request *request) | ||
838 | { | ||
839 | unsigned int cls, con, tag; | ||
840 | unsigned char *end; | ||
841 | |||
842 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | ||
843 | return 0; | ||
844 | |||
845 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | ||
846 | return 0; | ||
847 | |||
848 | if (!asn1_ulong_decode(ctx, end, &request->id)) | ||
849 | return 0; | ||
850 | |||
851 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | ||
852 | return 0; | ||
853 | |||
854 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | ||
855 | return 0; | ||
856 | |||
857 | if (!asn1_uint_decode(ctx, end, &request->error_status)) | ||
858 | return 0; | ||
859 | |||
860 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | ||
861 | return 0; | ||
862 | |||
863 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | ||
864 | return 0; | ||
865 | |||
866 | if (!asn1_uint_decode(ctx, end, &request->error_index)) | ||
867 | return 0; | ||
868 | |||
869 | return 1; | ||
870 | } | ||
871 | |||
872 | /* | ||
873 | * Fast checksum update for possibly oddly-aligned UDP byte, from the | ||
874 | * code example in the draft. | ||
875 | */ | ||
876 | static void fast_csum(__sum16 *csum, | ||
877 | const unsigned char *optr, | ||
878 | const unsigned char *nptr, | ||
879 | int offset) | ||
880 | { | ||
881 | unsigned char s[4]; | ||
882 | |||
883 | if (offset & 1) { | ||
884 | s[0] = s[2] = 0; | ||
885 | s[1] = ~*optr; | ||
886 | s[3] = *nptr; | ||
887 | } else { | ||
888 | s[1] = s[3] = 0; | ||
889 | s[0] = ~*optr; | ||
890 | s[2] = *nptr; | ||
891 | } | ||
892 | |||
893 | *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum))); | ||
894 | } | ||
895 | |||
896 | /* | ||
897 | * Mangle IP address. | ||
898 | * - begin points to the start of the snmp messgae | ||
899 | * - addr points to the start of the address | ||
900 | */ | ||
901 | static inline void mangle_address(unsigned char *begin, | ||
902 | unsigned char *addr, | ||
903 | const struct oct1_map *map, | ||
904 | __sum16 *check) | ||
905 | { | ||
906 | if (map->from == NOCT1(addr)) { | ||
907 | u_int32_t old; | ||
908 | |||
909 | if (debug) | ||
910 | memcpy(&old, (unsigned char *)addr, sizeof(old)); | ||
911 | |||
912 | *addr = map->to; | ||
913 | |||
914 | /* Update UDP checksum if being used */ | ||
915 | if (*check) { | ||
916 | fast_csum(check, | ||
917 | &map->from, &map->to, addr - begin); | ||
918 | |||
919 | } | ||
920 | |||
921 | if (debug) | ||
922 | printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to " | ||
923 | "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr)); | ||
924 | } | ||
925 | } | ||
926 | |||
927 | static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, | ||
928 | struct snmp_v1_trap *trap, | ||
929 | const struct oct1_map *map, | ||
930 | __sum16 *check) | ||
931 | { | ||
932 | unsigned int cls, con, tag, len; | ||
933 | unsigned char *end; | ||
934 | |||
935 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | ||
936 | return 0; | ||
937 | |||
938 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) | ||
939 | return 0; | ||
940 | |||
941 | if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len)) | ||
942 | return 0; | ||
943 | |||
944 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | ||
945 | goto err_id_free; | ||
946 | |||
947 | if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_IPA) || | ||
948 | (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_OTS))) | ||
949 | goto err_id_free; | ||
950 | |||
951 | if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len)) | ||
952 | goto err_id_free; | ||
953 | |||
954 | /* IPv4 only */ | ||
955 | if (len != 4) | ||
956 | goto err_addr_free; | ||
957 | |||
958 | mangle_address(ctx->begin, ctx->pointer - 4, map, check); | ||
959 | |||
960 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | ||
961 | goto err_addr_free; | ||
962 | |||
963 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | ||
964 | goto err_addr_free; | ||
965 | |||
966 | if (!asn1_uint_decode(ctx, end, &trap->general)) | ||
967 | goto err_addr_free; | ||
968 | |||
969 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | ||
970 | goto err_addr_free; | ||
971 | |||
972 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | ||
973 | goto err_addr_free; | ||
974 | |||
975 | if (!asn1_uint_decode(ctx, end, &trap->specific)) | ||
976 | goto err_addr_free; | ||
977 | |||
978 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | ||
979 | goto err_addr_free; | ||
980 | |||
981 | if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_TIT) || | ||
982 | (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_INT))) | ||
983 | goto err_addr_free; | ||
984 | |||
985 | if (!asn1_ulong_decode(ctx, end, &trap->time)) | ||
986 | goto err_addr_free; | ||
987 | |||
988 | return 1; | ||
989 | |||
990 | err_addr_free: | ||
991 | kfree((unsigned long *)trap->ip_address); | ||
992 | |||
993 | err_id_free: | ||
994 | kfree(trap->id); | ||
995 | |||
996 | return 0; | ||
997 | } | ||
998 | |||
999 | /***************************************************************************** | ||
1000 | * | ||
1001 | * Misc. routines | ||
1002 | * | ||
1003 | *****************************************************************************/ | ||
1004 | |||
1005 | static void hex_dump(unsigned char *buf, size_t len) | ||
1006 | { | ||
1007 | size_t i; | ||
1008 | |||
1009 | for (i = 0; i < len; i++) { | ||
1010 | if (i && !(i % 16)) | ||
1011 | printk("\n"); | ||
1012 | printk("%02x ", *(buf + i)); | ||
1013 | } | ||
1014 | printk("\n"); | ||
1015 | } | ||
1016 | |||
1017 | /* | ||
1018 | * Parse and mangle SNMP message according to mapping. | ||
1019 | * (And this is the fucking 'basic' method). | ||
1020 | */ | ||
1021 | static int snmp_parse_mangle(unsigned char *msg, | ||
1022 | u_int16_t len, | ||
1023 | const struct oct1_map *map, | ||
1024 | __sum16 *check) | ||
1025 | { | ||
1026 | unsigned char *eoc, *end; | ||
1027 | unsigned int cls, con, tag, vers, pdutype; | ||
1028 | struct asn1_ctx ctx; | ||
1029 | struct asn1_octstr comm; | ||
1030 | struct snmp_object **obj; | ||
1031 | |||
1032 | if (debug > 1) | ||
1033 | hex_dump(msg, len); | ||
1034 | |||
1035 | asn1_open(&ctx, msg, len); | ||
1036 | |||
1037 | /* | ||
1038 | * Start of SNMP message. | ||
1039 | */ | ||
1040 | if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) | ||
1041 | return 0; | ||
1042 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) | ||
1043 | return 0; | ||
1044 | |||
1045 | /* | ||
1046 | * Version 1 or 2 handled. | ||
1047 | */ | ||
1048 | if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag)) | ||
1049 | return 0; | ||
1050 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | ||
1051 | return 0; | ||
1052 | if (!asn1_uint_decode (&ctx, end, &vers)) | ||
1053 | return 0; | ||
1054 | if (debug > 1) | ||
1055 | printk(KERN_DEBUG "bsalg: snmp version: %u\n", vers + 1); | ||
1056 | if (vers > 1) | ||
1057 | return 1; | ||
1058 | |||
1059 | /* | ||
1060 | * Community. | ||
1061 | */ | ||
1062 | if (!asn1_header_decode (&ctx, &end, &cls, &con, &tag)) | ||
1063 | return 0; | ||
1064 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OTS) | ||
1065 | return 0; | ||
1066 | if (!asn1_octets_decode(&ctx, end, &comm.data, &comm.len)) | ||
1067 | return 0; | ||
1068 | if (debug > 1) { | ||
1069 | unsigned int i; | ||
1070 | |||
1071 | printk(KERN_DEBUG "bsalg: community: "); | ||
1072 | for (i = 0; i < comm.len; i++) | ||
1073 | printk("%c", comm.data[i]); | ||
1074 | printk("\n"); | ||
1075 | } | ||
1076 | kfree(comm.data); | ||
1077 | |||
1078 | /* | ||
1079 | * PDU type | ||
1080 | */ | ||
1081 | if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &pdutype)) | ||
1082 | return 0; | ||
1083 | if (cls != ASN1_CTX || con != ASN1_CON) | ||
1084 | return 0; | ||
1085 | if (debug > 1) { | ||
1086 | unsigned char *pdus[] = { | ||
1087 | [SNMP_PDU_GET] = "get", | ||
1088 | [SNMP_PDU_NEXT] = "get-next", | ||
1089 | [SNMP_PDU_RESPONSE] = "response", | ||
1090 | [SNMP_PDU_SET] = "set", | ||
1091 | [SNMP_PDU_TRAP1] = "trapv1", | ||
1092 | [SNMP_PDU_BULK] = "bulk", | ||
1093 | [SNMP_PDU_INFORM] = "inform", | ||
1094 | [SNMP_PDU_TRAP2] = "trapv2" | ||
1095 | }; | ||
1096 | |||
1097 | if (pdutype > SNMP_PDU_TRAP2) | ||
1098 | printk(KERN_DEBUG "bsalg: bad pdu type %u\n", pdutype); | ||
1099 | else | ||
1100 | printk(KERN_DEBUG "bsalg: pdu: %s\n", pdus[pdutype]); | ||
1101 | } | ||
1102 | if (pdutype != SNMP_PDU_RESPONSE && | ||
1103 | pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2) | ||
1104 | return 1; | ||
1105 | |||
1106 | /* | ||
1107 | * Request header or v1 trap | ||
1108 | */ | ||
1109 | if (pdutype == SNMP_PDU_TRAP1) { | ||
1110 | struct snmp_v1_trap trap; | ||
1111 | unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check); | ||
1112 | |||
1113 | if (ret) { | ||
1114 | kfree(trap.id); | ||
1115 | kfree((unsigned long *)trap.ip_address); | ||
1116 | } else | ||
1117 | return ret; | ||
1118 | |||
1119 | } else { | ||
1120 | struct snmp_request req; | ||
1121 | |||
1122 | if (!snmp_request_decode(&ctx, &req)) | ||
1123 | return 0; | ||
1124 | |||
1125 | if (debug > 1) | ||
1126 | printk(KERN_DEBUG "bsalg: request: id=0x%lx error_status=%u " | ||
1127 | "error_index=%u\n", req.id, req.error_status, | ||
1128 | req.error_index); | ||
1129 | } | ||
1130 | |||
1131 | /* | ||
1132 | * Loop through objects, look for IP addresses to mangle. | ||
1133 | */ | ||
1134 | if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) | ||
1135 | return 0; | ||
1136 | |||
1137 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) | ||
1138 | return 0; | ||
1139 | |||
1140 | obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC); | ||
1141 | if (obj == NULL) { | ||
1142 | if (net_ratelimit()) | ||
1143 | printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__); | ||
1144 | return 0; | ||
1145 | } | ||
1146 | |||
1147 | while (!asn1_eoc_decode(&ctx, eoc)) { | ||
1148 | unsigned int i; | ||
1149 | |||
1150 | if (!snmp_object_decode(&ctx, obj)) { | ||
1151 | if (*obj) { | ||
1152 | kfree((*obj)->id); | ||
1153 | kfree(*obj); | ||
1154 | } | ||
1155 | kfree(obj); | ||
1156 | return 0; | ||
1157 | } | ||
1158 | |||
1159 | if (debug > 1) { | ||
1160 | printk(KERN_DEBUG "bsalg: object: "); | ||
1161 | for (i = 0; i < (*obj)->id_len; i++) { | ||
1162 | if (i > 0) | ||
1163 | printk("."); | ||
1164 | printk("%lu", (*obj)->id[i]); | ||
1165 | } | ||
1166 | printk(": type=%u\n", (*obj)->type); | ||
1167 | |||
1168 | } | ||
1169 | |||
1170 | if ((*obj)->type == SNMP_IPADDR) | ||
1171 | mangle_address(ctx.begin, ctx.pointer - 4 , map, check); | ||
1172 | |||
1173 | kfree((*obj)->id); | ||
1174 | kfree(*obj); | ||
1175 | } | ||
1176 | kfree(obj); | ||
1177 | |||
1178 | if (!asn1_eoc_decode(&ctx, eoc)) | ||
1179 | return 0; | ||
1180 | |||
1181 | return 1; | ||
1182 | } | ||
1183 | |||
1184 | /***************************************************************************** | ||
1185 | * | ||
1186 | * NAT routines. | ||
1187 | * | ||
1188 | *****************************************************************************/ | ||
1189 | |||
1190 | /* | ||
1191 | * SNMP translation routine. | ||
1192 | */ | ||
1193 | static int snmp_translate(struct nf_conn *ct, | ||
1194 | enum ip_conntrack_info ctinfo, | ||
1195 | struct sk_buff **pskb) | ||
1196 | { | ||
1197 | struct iphdr *iph = (*pskb)->nh.iph; | ||
1198 | struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); | ||
1199 | u_int16_t udplen = ntohs(udph->len); | ||
1200 | u_int16_t paylen = udplen - sizeof(struct udphdr); | ||
1201 | int dir = CTINFO2DIR(ctinfo); | ||
1202 | struct oct1_map map; | ||
1203 | |||
1204 | /* | ||
1205 | * Determine mappping for application layer addresses based | ||
1206 | * on NAT manipulations for the packet. | ||
1207 | */ | ||
1208 | if (dir == IP_CT_DIR_ORIGINAL) { | ||
1209 | /* SNAT traps */ | ||
1210 | map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip); | ||
1211 | map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); | ||
1212 | } else { | ||
1213 | /* DNAT replies */ | ||
1214 | map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip); | ||
1215 | map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); | ||
1216 | } | ||
1217 | |||
1218 | if (map.from == map.to) | ||
1219 | return NF_ACCEPT; | ||
1220 | |||
1221 | if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr), | ||
1222 | paylen, &map, &udph->check)) { | ||
1223 | if (net_ratelimit()) | ||
1224 | printk(KERN_WARNING "bsalg: parser failed\n"); | ||
1225 | return NF_DROP; | ||
1226 | } | ||
1227 | return NF_ACCEPT; | ||
1228 | } | ||
1229 | |||
1230 | /* We don't actually set up expectations, just adjust internal IP | ||
1231 | * addresses if this is being NATted */ | ||
1232 | static int help(struct sk_buff **pskb, unsigned int protoff, | ||
1233 | struct nf_conn *ct, | ||
1234 | enum ip_conntrack_info ctinfo) | ||
1235 | { | ||
1236 | int dir = CTINFO2DIR(ctinfo); | ||
1237 | unsigned int ret; | ||
1238 | struct iphdr *iph = (*pskb)->nh.iph; | ||
1239 | struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); | ||
1240 | |||
1241 | /* SNMP replies and originating SNMP traps get mangled */ | ||
1242 | if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY) | ||
1243 | return NF_ACCEPT; | ||
1244 | if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) | ||
1245 | return NF_ACCEPT; | ||
1246 | |||
1247 | /* No NAT? */ | ||
1248 | if (!(ct->status & IPS_NAT_MASK)) | ||
1249 | return NF_ACCEPT; | ||
1250 | |||
1251 | /* | ||
1252 | * Make sure the packet length is ok. So far, we were only guaranteed | ||
1253 | * to have a valid length IP header plus 8 bytes, which means we have | ||
1254 | * enough room for a UDP header. Just verify the UDP length field so we | ||
1255 | * can mess around with the payload. | ||
1256 | */ | ||
1257 | if (ntohs(udph->len) != (*pskb)->len - (iph->ihl << 2)) { | ||
1258 | if (net_ratelimit()) | ||
1259 | printk(KERN_WARNING "SNMP: dropping malformed packet " | ||
1260 | "src=%u.%u.%u.%u dst=%u.%u.%u.%u\n", | ||
1261 | NIPQUAD(iph->saddr), NIPQUAD(iph->daddr)); | ||
1262 | return NF_DROP; | ||
1263 | } | ||
1264 | |||
1265 | if (!skb_make_writable(pskb, (*pskb)->len)) | ||
1266 | return NF_DROP; | ||
1267 | |||
1268 | spin_lock_bh(&snmp_lock); | ||
1269 | ret = snmp_translate(ct, ctinfo, pskb); | ||
1270 | spin_unlock_bh(&snmp_lock); | ||
1271 | return ret; | ||
1272 | } | ||
1273 | |||
1274 | static struct nf_conntrack_helper snmp_helper __read_mostly = { | ||
1275 | .max_expected = 0, | ||
1276 | .timeout = 180, | ||
1277 | .me = THIS_MODULE, | ||
1278 | .help = help, | ||
1279 | .name = "snmp", | ||
1280 | .tuple.src.l3num = AF_INET, | ||
1281 | .tuple.src.u.udp.port = __constant_htons(SNMP_PORT), | ||
1282 | .tuple.dst.protonum = IPPROTO_UDP, | ||
1283 | .mask.src.l3num = 0xFFFF, | ||
1284 | .mask.src.u.udp.port = __constant_htons(0xFFFF), | ||
1285 | .mask.dst.protonum = 0xFF, | ||
1286 | }; | ||
1287 | |||
1288 | static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { | ||
1289 | .max_expected = 0, | ||
1290 | .timeout = 180, | ||
1291 | .me = THIS_MODULE, | ||
1292 | .help = help, | ||
1293 | .name = "snmp_trap", | ||
1294 | .tuple.src.l3num = AF_INET, | ||
1295 | .tuple.src.u.udp.port = __constant_htons(SNMP_TRAP_PORT), | ||
1296 | .tuple.dst.protonum = IPPROTO_UDP, | ||
1297 | .mask.src.l3num = 0xFFFF, | ||
1298 | .mask.src.u.udp.port = __constant_htons(0xFFFF), | ||
1299 | .mask.dst.protonum = 0xFF, | ||
1300 | }; | ||
1301 | |||
1302 | /***************************************************************************** | ||
1303 | * | ||
1304 | * Module stuff. | ||
1305 | * | ||
1306 | *****************************************************************************/ | ||
1307 | |||
1308 | static int __init nf_nat_snmp_basic_init(void) | ||
1309 | { | ||
1310 | int ret = 0; | ||
1311 | |||
1312 | ret = nf_conntrack_helper_register(&snmp_helper); | ||
1313 | if (ret < 0) | ||
1314 | return ret; | ||
1315 | ret = nf_conntrack_helper_register(&snmp_trap_helper); | ||
1316 | if (ret < 0) { | ||
1317 | nf_conntrack_helper_unregister(&snmp_helper); | ||
1318 | return ret; | ||
1319 | } | ||
1320 | return ret; | ||
1321 | } | ||
1322 | |||
1323 | static void __exit nf_nat_snmp_basic_fini(void) | ||
1324 | { | ||
1325 | nf_conntrack_helper_unregister(&snmp_helper); | ||
1326 | nf_conntrack_helper_unregister(&snmp_trap_helper); | ||
1327 | } | ||
1328 | |||
1329 | module_init(nf_nat_snmp_basic_init); | ||
1330 | module_exit(nf_nat_snmp_basic_fini); | ||
1331 | |||
1332 | module_param(debug, int, 0600); | ||
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c new file mode 100644 index 000000000000..730a7a44c883 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_standalone.c | |||
@@ -0,0 +1,406 @@ | |||
1 | /* (C) 1999-2001 Paul `Rusty' Russell | ||
2 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/icmp.h> | ||
10 | #include <linux/ip.h> | ||
11 | #include <linux/netfilter.h> | ||
12 | #include <linux/netfilter_ipv4.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/proc_fs.h> | ||
16 | #include <net/ip.h> | ||
17 | #include <net/checksum.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | |||
20 | #include <net/netfilter/nf_conntrack.h> | ||
21 | #include <net/netfilter/nf_conntrack_core.h> | ||
22 | #include <net/netfilter/nf_nat.h> | ||
23 | #include <net/netfilter/nf_nat_rule.h> | ||
24 | #include <net/netfilter/nf_nat_protocol.h> | ||
25 | #include <net/netfilter/nf_nat_core.h> | ||
26 | #include <net/netfilter/nf_nat_helper.h> | ||
27 | #include <linux/netfilter_ipv4/ip_tables.h> | ||
28 | |||
29 | #if 0 | ||
30 | #define DEBUGP printk | ||
31 | #else | ||
32 | #define DEBUGP(format, args...) | ||
33 | #endif | ||
34 | |||
35 | #define HOOKNAME(hooknum) ((hooknum) == NF_IP_POST_ROUTING ? "POST_ROUTING" \ | ||
36 | : ((hooknum) == NF_IP_PRE_ROUTING ? "PRE_ROUTING" \ | ||
37 | : ((hooknum) == NF_IP_LOCAL_OUT ? "LOCAL_OUT" \ | ||
38 | : ((hooknum) == NF_IP_LOCAL_IN ? "LOCAL_IN" \ | ||
39 | : "*ERROR*"))) | ||
40 | |||
41 | #ifdef CONFIG_XFRM | ||
42 | static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) | ||
43 | { | ||
44 | struct nf_conn *ct; | ||
45 | struct nf_conntrack_tuple *t; | ||
46 | enum ip_conntrack_info ctinfo; | ||
47 | enum ip_conntrack_dir dir; | ||
48 | unsigned long statusbit; | ||
49 | |||
50 | ct = nf_ct_get(skb, &ctinfo); | ||
51 | if (ct == NULL) | ||
52 | return; | ||
53 | dir = CTINFO2DIR(ctinfo); | ||
54 | t = &ct->tuplehash[dir].tuple; | ||
55 | |||
56 | if (dir == IP_CT_DIR_ORIGINAL) | ||
57 | statusbit = IPS_DST_NAT; | ||
58 | else | ||
59 | statusbit = IPS_SRC_NAT; | ||
60 | |||
61 | if (ct->status & statusbit) { | ||
62 | fl->fl4_dst = t->dst.u3.ip; | ||
63 | if (t->dst.protonum == IPPROTO_TCP || | ||
64 | t->dst.protonum == IPPROTO_UDP) | ||
65 | fl->fl_ip_dport = t->dst.u.tcp.port; | ||
66 | } | ||
67 | |||
68 | statusbit ^= IPS_NAT_MASK; | ||
69 | |||
70 | if (ct->status & statusbit) { | ||
71 | fl->fl4_src = t->src.u3.ip; | ||
72 | if (t->dst.protonum == IPPROTO_TCP || | ||
73 | t->dst.protonum == IPPROTO_UDP) | ||
74 | fl->fl_ip_sport = t->src.u.tcp.port; | ||
75 | } | ||
76 | } | ||
77 | #endif | ||
78 | |||
79 | static unsigned int | ||
80 | nf_nat_fn(unsigned int hooknum, | ||
81 | struct sk_buff **pskb, | ||
82 | const struct net_device *in, | ||
83 | const struct net_device *out, | ||
84 | int (*okfn)(struct sk_buff *)) | ||
85 | { | ||
86 | struct nf_conn *ct; | ||
87 | enum ip_conntrack_info ctinfo; | ||
88 | struct nf_conn_nat *nat; | ||
89 | struct nf_nat_info *info; | ||
90 | /* maniptype == SRC for postrouting. */ | ||
91 | enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum); | ||
92 | |||
93 | /* We never see fragments: conntrack defrags on pre-routing | ||
94 | and local-out, and nf_nat_out protects post-routing. */ | ||
95 | NF_CT_ASSERT(!((*pskb)->nh.iph->frag_off | ||
96 | & htons(IP_MF|IP_OFFSET))); | ||
97 | |||
98 | ct = nf_ct_get(*pskb, &ctinfo); | ||
99 | /* Can't track? It's not due to stress, or conntrack would | ||
100 | have dropped it. Hence it's the user's responsibilty to | ||
101 | packet filter it out, or implement conntrack/NAT for that | ||
102 | protocol. 8) --RR */ | ||
103 | if (!ct) { | ||
104 | /* Exception: ICMP redirect to new connection (not in | ||
105 | hash table yet). We must not let this through, in | ||
106 | case we're doing NAT to the same network. */ | ||
107 | if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { | ||
108 | struct icmphdr _hdr, *hp; | ||
109 | |||
110 | hp = skb_header_pointer(*pskb, | ||
111 | (*pskb)->nh.iph->ihl*4, | ||
112 | sizeof(_hdr), &_hdr); | ||
113 | if (hp != NULL && | ||
114 | hp->type == ICMP_REDIRECT) | ||
115 | return NF_DROP; | ||
116 | } | ||
117 | return NF_ACCEPT; | ||
118 | } | ||
119 | |||
120 | /* Don't try to NAT if this packet is not conntracked */ | ||
121 | if (ct == &nf_conntrack_untracked) | ||
122 | return NF_ACCEPT; | ||
123 | |||
124 | nat = nfct_nat(ct); | ||
125 | if (!nat) | ||
126 | return NF_DROP; | ||
127 | |||
128 | switch (ctinfo) { | ||
129 | case IP_CT_RELATED: | ||
130 | case IP_CT_RELATED+IP_CT_IS_REPLY: | ||
131 | if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { | ||
132 | if (!nf_nat_icmp_reply_translation(ct, ctinfo, | ||
133 | hooknum, pskb)) | ||
134 | return NF_DROP; | ||
135 | else | ||
136 | return NF_ACCEPT; | ||
137 | } | ||
138 | /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ | ||
139 | case IP_CT_NEW: | ||
140 | info = &nat->info; | ||
141 | |||
142 | /* Seen it before? This can happen for loopback, retrans, | ||
143 | or local packets.. */ | ||
144 | if (!nf_nat_initialized(ct, maniptype)) { | ||
145 | unsigned int ret; | ||
146 | |||
147 | if (unlikely(nf_ct_is_confirmed(ct))) | ||
148 | /* NAT module was loaded late */ | ||
149 | ret = alloc_null_binding_confirmed(ct, info, | ||
150 | hooknum); | ||
151 | else if (hooknum == NF_IP_LOCAL_IN) | ||
152 | /* LOCAL_IN hook doesn't have a chain! */ | ||
153 | ret = alloc_null_binding(ct, info, hooknum); | ||
154 | else | ||
155 | ret = nf_nat_rule_find(pskb, hooknum, in, out, | ||
156 | ct, info); | ||
157 | |||
158 | if (ret != NF_ACCEPT) { | ||
159 | return ret; | ||
160 | } | ||
161 | } else | ||
162 | DEBUGP("Already setup manip %s for ct %p\n", | ||
163 | maniptype == IP_NAT_MANIP_SRC ? "SRC" : "DST", | ||
164 | ct); | ||
165 | break; | ||
166 | |||
167 | default: | ||
168 | /* ESTABLISHED */ | ||
169 | NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || | ||
170 | ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY)); | ||
171 | info = &nat->info; | ||
172 | } | ||
173 | |||
174 | NF_CT_ASSERT(info); | ||
175 | return nf_nat_packet(ct, ctinfo, hooknum, pskb); | ||
176 | } | ||
177 | |||
178 | static unsigned int | ||
179 | nf_nat_in(unsigned int hooknum, | ||
180 | struct sk_buff **pskb, | ||
181 | const struct net_device *in, | ||
182 | const struct net_device *out, | ||
183 | int (*okfn)(struct sk_buff *)) | ||
184 | { | ||
185 | unsigned int ret; | ||
186 | __be32 daddr = (*pskb)->nh.iph->daddr; | ||
187 | |||
188 | ret = nf_nat_fn(hooknum, pskb, in, out, okfn); | ||
189 | if (ret != NF_DROP && ret != NF_STOLEN && | ||
190 | daddr != (*pskb)->nh.iph->daddr) { | ||
191 | dst_release((*pskb)->dst); | ||
192 | (*pskb)->dst = NULL; | ||
193 | } | ||
194 | return ret; | ||
195 | } | ||
196 | |||
197 | static unsigned int | ||
198 | nf_nat_out(unsigned int hooknum, | ||
199 | struct sk_buff **pskb, | ||
200 | const struct net_device *in, | ||
201 | const struct net_device *out, | ||
202 | int (*okfn)(struct sk_buff *)) | ||
203 | { | ||
204 | #ifdef CONFIG_XFRM | ||
205 | struct nf_conn *ct; | ||
206 | enum ip_conntrack_info ctinfo; | ||
207 | #endif | ||
208 | unsigned int ret; | ||
209 | |||
210 | /* root is playing with raw sockets. */ | ||
211 | if ((*pskb)->len < sizeof(struct iphdr) || | ||
212 | (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) | ||
213 | return NF_ACCEPT; | ||
214 | |||
215 | ret = nf_nat_fn(hooknum, pskb, in, out, okfn); | ||
216 | #ifdef CONFIG_XFRM | ||
217 | if (ret != NF_DROP && ret != NF_STOLEN && | ||
218 | (ct = nf_ct_get(*pskb, &ctinfo)) != NULL) { | ||
219 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | ||
220 | |||
221 | if (ct->tuplehash[dir].tuple.src.u3.ip != | ||
222 | ct->tuplehash[!dir].tuple.dst.u3.ip | ||
223 | || ct->tuplehash[dir].tuple.src.u.all != | ||
224 | ct->tuplehash[!dir].tuple.dst.u.all | ||
225 | ) | ||
226 | return ip_xfrm_me_harder(pskb) == 0 ? ret : NF_DROP; | ||
227 | } | ||
228 | #endif | ||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | static unsigned int | ||
233 | nf_nat_local_fn(unsigned int hooknum, | ||
234 | struct sk_buff **pskb, | ||
235 | const struct net_device *in, | ||
236 | const struct net_device *out, | ||
237 | int (*okfn)(struct sk_buff *)) | ||
238 | { | ||
239 | struct nf_conn *ct; | ||
240 | enum ip_conntrack_info ctinfo; | ||
241 | unsigned int ret; | ||
242 | |||
243 | /* root is playing with raw sockets. */ | ||
244 | if ((*pskb)->len < sizeof(struct iphdr) || | ||
245 | (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) | ||
246 | return NF_ACCEPT; | ||
247 | |||
248 | ret = nf_nat_fn(hooknum, pskb, in, out, okfn); | ||
249 | if (ret != NF_DROP && ret != NF_STOLEN && | ||
250 | (ct = nf_ct_get(*pskb, &ctinfo)) != NULL) { | ||
251 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | ||
252 | |||
253 | if (ct->tuplehash[dir].tuple.dst.u3.ip != | ||
254 | ct->tuplehash[!dir].tuple.src.u3.ip | ||
255 | #ifdef CONFIG_XFRM | ||
256 | || ct->tuplehash[dir].tuple.dst.u.all != | ||
257 | ct->tuplehash[!dir].tuple.src.u.all | ||
258 | #endif | ||
259 | ) | ||
260 | if (ip_route_me_harder(pskb, RTN_UNSPEC)) | ||
261 | ret = NF_DROP; | ||
262 | } | ||
263 | return ret; | ||
264 | } | ||
265 | |||
266 | static unsigned int | ||
267 | nf_nat_adjust(unsigned int hooknum, | ||
268 | struct sk_buff **pskb, | ||
269 | const struct net_device *in, | ||
270 | const struct net_device *out, | ||
271 | int (*okfn)(struct sk_buff *)) | ||
272 | { | ||
273 | struct nf_conn *ct; | ||
274 | enum ip_conntrack_info ctinfo; | ||
275 | |||
276 | ct = nf_ct_get(*pskb, &ctinfo); | ||
277 | if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { | ||
278 | DEBUGP("nf_nat_standalone: adjusting sequence number\n"); | ||
279 | if (!nf_nat_seq_adjust(pskb, ct, ctinfo)) | ||
280 | return NF_DROP; | ||
281 | } | ||
282 | return NF_ACCEPT; | ||
283 | } | ||
284 | |||
285 | /* We must be after connection tracking and before packet filtering. */ | ||
286 | |||
287 | static struct nf_hook_ops nf_nat_ops[] = { | ||
288 | /* Before packet filtering, change destination */ | ||
289 | { | ||
290 | .hook = nf_nat_in, | ||
291 | .owner = THIS_MODULE, | ||
292 | .pf = PF_INET, | ||
293 | .hooknum = NF_IP_PRE_ROUTING, | ||
294 | .priority = NF_IP_PRI_NAT_DST, | ||
295 | }, | ||
296 | /* After packet filtering, change source */ | ||
297 | { | ||
298 | .hook = nf_nat_out, | ||
299 | .owner = THIS_MODULE, | ||
300 | .pf = PF_INET, | ||
301 | .hooknum = NF_IP_POST_ROUTING, | ||
302 | .priority = NF_IP_PRI_NAT_SRC, | ||
303 | }, | ||
304 | /* After conntrack, adjust sequence number */ | ||
305 | { | ||
306 | .hook = nf_nat_adjust, | ||
307 | .owner = THIS_MODULE, | ||
308 | .pf = PF_INET, | ||
309 | .hooknum = NF_IP_POST_ROUTING, | ||
310 | .priority = NF_IP_PRI_NAT_SEQ_ADJUST, | ||
311 | }, | ||
312 | /* Before packet filtering, change destination */ | ||
313 | { | ||
314 | .hook = nf_nat_local_fn, | ||
315 | .owner = THIS_MODULE, | ||
316 | .pf = PF_INET, | ||
317 | .hooknum = NF_IP_LOCAL_OUT, | ||
318 | .priority = NF_IP_PRI_NAT_DST, | ||
319 | }, | ||
320 | /* After packet filtering, change source */ | ||
321 | { | ||
322 | .hook = nf_nat_fn, | ||
323 | .owner = THIS_MODULE, | ||
324 | .pf = PF_INET, | ||
325 | .hooknum = NF_IP_LOCAL_IN, | ||
326 | .priority = NF_IP_PRI_NAT_SRC, | ||
327 | }, | ||
328 | /* After conntrack, adjust sequence number */ | ||
329 | { | ||
330 | .hook = nf_nat_adjust, | ||
331 | .owner = THIS_MODULE, | ||
332 | .pf = PF_INET, | ||
333 | .hooknum = NF_IP_LOCAL_IN, | ||
334 | .priority = NF_IP_PRI_NAT_SEQ_ADJUST, | ||
335 | }, | ||
336 | }; | ||
337 | |||
338 | static int __init nf_nat_standalone_init(void) | ||
339 | { | ||
340 | int size, ret = 0; | ||
341 | |||
342 | need_conntrack(); | ||
343 | |||
344 | size = ALIGN(sizeof(struct nf_conn), __alignof__(struct nf_conn_nat)) + | ||
345 | sizeof(struct nf_conn_nat); | ||
346 | ret = nf_conntrack_register_cache(NF_CT_F_NAT, "nf_nat:base", size); | ||
347 | if (ret < 0) { | ||
348 | printk(KERN_ERR "nf_nat_init: Unable to create slab cache\n"); | ||
349 | return ret; | ||
350 | } | ||
351 | |||
352 | size = ALIGN(size, __alignof__(struct nf_conn_help)) + | ||
353 | sizeof(struct nf_conn_help); | ||
354 | ret = nf_conntrack_register_cache(NF_CT_F_NAT|NF_CT_F_HELP, | ||
355 | "nf_nat:help", size); | ||
356 | if (ret < 0) { | ||
357 | printk(KERN_ERR "nf_nat_init: Unable to create slab cache\n"); | ||
358 | goto cleanup_register_cache; | ||
359 | } | ||
360 | #ifdef CONFIG_XFRM | ||
361 | BUG_ON(ip_nat_decode_session != NULL); | ||
362 | ip_nat_decode_session = nat_decode_session; | ||
363 | #endif | ||
364 | ret = nf_nat_rule_init(); | ||
365 | if (ret < 0) { | ||
366 | printk("nf_nat_init: can't setup rules.\n"); | ||
367 | goto cleanup_decode_session; | ||
368 | } | ||
369 | ret = nf_register_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); | ||
370 | if (ret < 0) { | ||
371 | printk("nf_nat_init: can't register hooks.\n"); | ||
372 | goto cleanup_rule_init; | ||
373 | } | ||
374 | nf_nat_module_is_loaded = 1; | ||
375 | return ret; | ||
376 | |||
377 | cleanup_rule_init: | ||
378 | nf_nat_rule_cleanup(); | ||
379 | cleanup_decode_session: | ||
380 | #ifdef CONFIG_XFRM | ||
381 | ip_nat_decode_session = NULL; | ||
382 | synchronize_net(); | ||
383 | #endif | ||
384 | nf_conntrack_unregister_cache(NF_CT_F_NAT|NF_CT_F_HELP); | ||
385 | cleanup_register_cache: | ||
386 | nf_conntrack_unregister_cache(NF_CT_F_NAT); | ||
387 | return ret; | ||
388 | } | ||
389 | |||
390 | static void __exit nf_nat_standalone_fini(void) | ||
391 | { | ||
392 | nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops)); | ||
393 | nf_nat_rule_cleanup(); | ||
394 | nf_nat_module_is_loaded = 0; | ||
395 | #ifdef CONFIG_XFRM | ||
396 | ip_nat_decode_session = NULL; | ||
397 | synchronize_net(); | ||
398 | #endif | ||
399 | /* Conntrack caches are unregistered in nf_conntrack_cleanup */ | ||
400 | } | ||
401 | |||
402 | module_init(nf_nat_standalone_init); | ||
403 | module_exit(nf_nat_standalone_fini); | ||
404 | |||
405 | MODULE_LICENSE("GPL"); | ||
406 | MODULE_ALIAS("ip_nat"); | ||
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c new file mode 100644 index 000000000000..2566b79de224 --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_tftp.c | |||
@@ -0,0 +1,52 @@ | |||
1 | /* (C) 2001-2002 Magnus Boden <mb@ozaba.mine.nu> | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 as | ||
5 | * published by the Free Software Foundation. | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/moduleparam.h> | ||
10 | #include <linux/udp.h> | ||
11 | |||
12 | #include <net/netfilter/nf_nat_helper.h> | ||
13 | #include <net/netfilter/nf_nat_rule.h> | ||
14 | #include <net/netfilter/nf_conntrack_helper.h> | ||
15 | #include <net/netfilter/nf_conntrack_expect.h> | ||
16 | #include <linux/netfilter/nf_conntrack_tftp.h> | ||
17 | |||
18 | MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>"); | ||
19 | MODULE_DESCRIPTION("TFTP NAT helper"); | ||
20 | MODULE_LICENSE("GPL"); | ||
21 | MODULE_ALIAS("ip_nat_tftp"); | ||
22 | |||
23 | static unsigned int help(struct sk_buff **pskb, | ||
24 | enum ip_conntrack_info ctinfo, | ||
25 | struct nf_conntrack_expect *exp) | ||
26 | { | ||
27 | struct nf_conn *ct = exp->master; | ||
28 | |||
29 | exp->saved_proto.udp.port | ||
30 | = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; | ||
31 | exp->dir = IP_CT_DIR_REPLY; | ||
32 | exp->expectfn = nf_nat_follow_master; | ||
33 | if (nf_conntrack_expect_related(exp) != 0) | ||
34 | return NF_DROP; | ||
35 | return NF_ACCEPT; | ||
36 | } | ||
37 | |||
38 | static void __exit nf_nat_tftp_fini(void) | ||
39 | { | ||
40 | rcu_assign_pointer(nf_nat_tftp_hook, NULL); | ||
41 | synchronize_rcu(); | ||
42 | } | ||
43 | |||
44 | static int __init nf_nat_tftp_init(void) | ||
45 | { | ||
46 | BUG_ON(rcu_dereference(nf_nat_tftp_hook)); | ||
47 | rcu_assign_pointer(nf_nat_tftp_hook, help); | ||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | module_init(nf_nat_tftp_init); | ||
52 | module_exit(nf_nat_tftp_fini); | ||
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 9c6cbe3d9fb8..cd873da54cbe 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <net/protocol.h> | 38 | #include <net/protocol.h> |
39 | #include <net/tcp.h> | 39 | #include <net/tcp.h> |
40 | #include <net/udp.h> | 40 | #include <net/udp.h> |
41 | #include <net/udplite.h> | ||
41 | #include <linux/inetdevice.h> | 42 | #include <linux/inetdevice.h> |
42 | #include <linux/proc_fs.h> | 43 | #include <linux/proc_fs.h> |
43 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
@@ -66,6 +67,7 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) | |||
66 | tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated), | 67 | tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated), |
67 | atomic_read(&tcp_memory_allocated)); | 68 | atomic_read(&tcp_memory_allocated)); |
68 | seq_printf(seq, "UDP: inuse %d\n", fold_prot_inuse(&udp_prot)); | 69 | seq_printf(seq, "UDP: inuse %d\n", fold_prot_inuse(&udp_prot)); |
70 | seq_printf(seq, "UDPLITE: inuse %d\n", fold_prot_inuse(&udplite_prot)); | ||
69 | seq_printf(seq, "RAW: inuse %d\n", fold_prot_inuse(&raw_prot)); | 71 | seq_printf(seq, "RAW: inuse %d\n", fold_prot_inuse(&raw_prot)); |
70 | seq_printf(seq, "FRAG: inuse %d memory %d\n", ip_frag_nqueues, | 72 | seq_printf(seq, "FRAG: inuse %d memory %d\n", ip_frag_nqueues, |
71 | atomic_read(&ip_frag_mem)); | 73 | atomic_read(&ip_frag_mem)); |
@@ -304,6 +306,17 @@ static int snmp_seq_show(struct seq_file *seq, void *v) | |||
304 | fold_field((void **) udp_statistics, | 306 | fold_field((void **) udp_statistics, |
305 | snmp4_udp_list[i].entry)); | 307 | snmp4_udp_list[i].entry)); |
306 | 308 | ||
309 | /* the UDP and UDP-Lite MIBs are the same */ | ||
310 | seq_puts(seq, "\nUdpLite:"); | ||
311 | for (i = 0; snmp4_udp_list[i].name != NULL; i++) | ||
312 | seq_printf(seq, " %s", snmp4_udp_list[i].name); | ||
313 | |||
314 | seq_puts(seq, "\nUdpLite:"); | ||
315 | for (i = 0; snmp4_udp_list[i].name != NULL; i++) | ||
316 | seq_printf(seq, " %lu", | ||
317 | fold_field((void **) udplite_statistics, | ||
318 | snmp4_udp_list[i].entry) ); | ||
319 | |||
307 | seq_putc(seq, '\n'); | 320 | seq_putc(seq, '\n'); |
308 | return 0; | 321 | return 0; |
309 | } | 322 | } |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 5c31dead2bdc..a6c63bbd9ddb 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -854,8 +854,8 @@ static void raw_seq_stop(struct seq_file *seq, void *v) | |||
854 | static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i) | 854 | static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i) |
855 | { | 855 | { |
856 | struct inet_sock *inet = inet_sk(sp); | 856 | struct inet_sock *inet = inet_sk(sp); |
857 | unsigned int dest = inet->daddr, | 857 | __be32 dest = inet->daddr, |
858 | src = inet->rcv_saddr; | 858 | src = inet->rcv_saddr; |
859 | __u16 destp = 0, | 859 | __u16 destp = 0, |
860 | srcp = inet->num; | 860 | srcp = inet->num; |
861 | 861 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 925ee4dfc32c..9f3924c4905e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -566,11 +566,9 @@ static inline u32 rt_score(struct rtable *rt) | |||
566 | 566 | ||
567 | static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) | 567 | static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) |
568 | { | 568 | { |
569 | return ((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | | 569 | return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | |
570 | (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) | | 570 | (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) | |
571 | #ifdef CONFIG_IP_ROUTE_FWMARK | 571 | (fl1->mark ^ fl2->mark) | |
572 | (fl1->nl_u.ip4_u.fwmark ^ fl2->nl_u.ip4_u.fwmark) | | ||
573 | #endif | ||
574 | (*(u16 *)&fl1->nl_u.ip4_u.tos ^ | 572 | (*(u16 *)&fl1->nl_u.ip4_u.tos ^ |
575 | *(u16 *)&fl2->nl_u.ip4_u.tos) | | 573 | *(u16 *)&fl2->nl_u.ip4_u.tos) | |
576 | (fl1->oif ^ fl2->oif) | | 574 | (fl1->oif ^ fl2->oif) | |
@@ -1643,9 +1641,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1643 | rth->fl.fl4_dst = daddr; | 1641 | rth->fl.fl4_dst = daddr; |
1644 | rth->rt_dst = daddr; | 1642 | rth->rt_dst = daddr; |
1645 | rth->fl.fl4_tos = tos; | 1643 | rth->fl.fl4_tos = tos; |
1646 | #ifdef CONFIG_IP_ROUTE_FWMARK | 1644 | rth->fl.mark = skb->mark; |
1647 | rth->fl.fl4_fwmark= skb->nfmark; | ||
1648 | #endif | ||
1649 | rth->fl.fl4_src = saddr; | 1645 | rth->fl.fl4_src = saddr; |
1650 | rth->rt_src = saddr; | 1646 | rth->rt_src = saddr; |
1651 | #ifdef CONFIG_NET_CLS_ROUTE | 1647 | #ifdef CONFIG_NET_CLS_ROUTE |
@@ -1789,9 +1785,7 @@ static inline int __mkroute_input(struct sk_buff *skb, | |||
1789 | rth->fl.fl4_dst = daddr; | 1785 | rth->fl.fl4_dst = daddr; |
1790 | rth->rt_dst = daddr; | 1786 | rth->rt_dst = daddr; |
1791 | rth->fl.fl4_tos = tos; | 1787 | rth->fl.fl4_tos = tos; |
1792 | #ifdef CONFIG_IP_ROUTE_FWMARK | 1788 | rth->fl.mark = skb->mark; |
1793 | rth->fl.fl4_fwmark= skb->nfmark; | ||
1794 | #endif | ||
1795 | rth->fl.fl4_src = saddr; | 1789 | rth->fl.fl4_src = saddr; |
1796 | rth->rt_src = saddr; | 1790 | rth->rt_src = saddr; |
1797 | rth->rt_gateway = daddr; | 1791 | rth->rt_gateway = daddr; |
@@ -1920,10 +1914,8 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1920 | .saddr = saddr, | 1914 | .saddr = saddr, |
1921 | .tos = tos, | 1915 | .tos = tos, |
1922 | .scope = RT_SCOPE_UNIVERSE, | 1916 | .scope = RT_SCOPE_UNIVERSE, |
1923 | #ifdef CONFIG_IP_ROUTE_FWMARK | ||
1924 | .fwmark = skb->nfmark | ||
1925 | #endif | ||
1926 | } }, | 1917 | } }, |
1918 | .mark = skb->mark, | ||
1927 | .iif = dev->ifindex }; | 1919 | .iif = dev->ifindex }; |
1928 | unsigned flags = 0; | 1920 | unsigned flags = 0; |
1929 | u32 itag = 0; | 1921 | u32 itag = 0; |
@@ -2034,9 +2026,7 @@ local_input: | |||
2034 | rth->fl.fl4_dst = daddr; | 2026 | rth->fl.fl4_dst = daddr; |
2035 | rth->rt_dst = daddr; | 2027 | rth->rt_dst = daddr; |
2036 | rth->fl.fl4_tos = tos; | 2028 | rth->fl.fl4_tos = tos; |
2037 | #ifdef CONFIG_IP_ROUTE_FWMARK | 2029 | rth->fl.mark = skb->mark; |
2038 | rth->fl.fl4_fwmark= skb->nfmark; | ||
2039 | #endif | ||
2040 | rth->fl.fl4_src = saddr; | 2030 | rth->fl.fl4_src = saddr; |
2041 | rth->rt_src = saddr; | 2031 | rth->rt_src = saddr; |
2042 | #ifdef CONFIG_NET_CLS_ROUTE | 2032 | #ifdef CONFIG_NET_CLS_ROUTE |
@@ -2113,9 +2103,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2113 | rth->fl.fl4_src == saddr && | 2103 | rth->fl.fl4_src == saddr && |
2114 | rth->fl.iif == iif && | 2104 | rth->fl.iif == iif && |
2115 | rth->fl.oif == 0 && | 2105 | rth->fl.oif == 0 && |
2116 | #ifdef CONFIG_IP_ROUTE_FWMARK | 2106 | rth->fl.mark == skb->mark && |
2117 | rth->fl.fl4_fwmark == skb->nfmark && | ||
2118 | #endif | ||
2119 | rth->fl.fl4_tos == tos) { | 2107 | rth->fl.fl4_tos == tos) { |
2120 | rth->u.dst.lastuse = jiffies; | 2108 | rth->u.dst.lastuse = jiffies; |
2121 | dst_hold(&rth->u.dst); | 2109 | dst_hold(&rth->u.dst); |
@@ -2239,9 +2227,7 @@ static inline int __mkroute_output(struct rtable **result, | |||
2239 | rth->fl.fl4_tos = tos; | 2227 | rth->fl.fl4_tos = tos; |
2240 | rth->fl.fl4_src = oldflp->fl4_src; | 2228 | rth->fl.fl4_src = oldflp->fl4_src; |
2241 | rth->fl.oif = oldflp->oif; | 2229 | rth->fl.oif = oldflp->oif; |
2242 | #ifdef CONFIG_IP_ROUTE_FWMARK | 2230 | rth->fl.mark = oldflp->mark; |
2243 | rth->fl.fl4_fwmark= oldflp->fl4_fwmark; | ||
2244 | #endif | ||
2245 | rth->rt_dst = fl->fl4_dst; | 2231 | rth->rt_dst = fl->fl4_dst; |
2246 | rth->rt_src = fl->fl4_src; | 2232 | rth->rt_src = fl->fl4_src; |
2247 | rth->rt_iif = oldflp->oif ? : dev_out->ifindex; | 2233 | rth->rt_iif = oldflp->oif ? : dev_out->ifindex; |
@@ -2385,10 +2371,8 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) | |||
2385 | .scope = ((tos & RTO_ONLINK) ? | 2371 | .scope = ((tos & RTO_ONLINK) ? |
2386 | RT_SCOPE_LINK : | 2372 | RT_SCOPE_LINK : |
2387 | RT_SCOPE_UNIVERSE), | 2373 | RT_SCOPE_UNIVERSE), |
2388 | #ifdef CONFIG_IP_ROUTE_FWMARK | ||
2389 | .fwmark = oldflp->fl4_fwmark | ||
2390 | #endif | ||
2391 | } }, | 2374 | } }, |
2375 | .mark = oldflp->mark, | ||
2392 | .iif = loopback_dev.ifindex, | 2376 | .iif = loopback_dev.ifindex, |
2393 | .oif = oldflp->oif }; | 2377 | .oif = oldflp->oif }; |
2394 | struct fib_result res; | 2378 | struct fib_result res; |
@@ -2583,9 +2567,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp) | |||
2583 | rth->fl.fl4_src == flp->fl4_src && | 2567 | rth->fl.fl4_src == flp->fl4_src && |
2584 | rth->fl.iif == 0 && | 2568 | rth->fl.iif == 0 && |
2585 | rth->fl.oif == flp->oif && | 2569 | rth->fl.oif == flp->oif && |
2586 | #ifdef CONFIG_IP_ROUTE_FWMARK | 2570 | rth->fl.mark == flp->mark && |
2587 | rth->fl.fl4_fwmark == flp->fl4_fwmark && | ||
2588 | #endif | ||
2589 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & | 2571 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & |
2590 | (IPTOS_RT_MASK | RTO_ONLINK))) { | 2572 | (IPTOS_RT_MASK | RTO_ONLINK))) { |
2591 | 2573 | ||
@@ -2647,7 +2629,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
2647 | struct rtable *rt = (struct rtable*)skb->dst; | 2629 | struct rtable *rt = (struct rtable*)skb->dst; |
2648 | struct rtmsg *r; | 2630 | struct rtmsg *r; |
2649 | struct nlmsghdr *nlh; | 2631 | struct nlmsghdr *nlh; |
2650 | struct rta_cacheinfo ci; | 2632 | long expires; |
2633 | u32 id = 0, ts = 0, tsage = 0, error; | ||
2651 | 2634 | ||
2652 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); | 2635 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); |
2653 | if (nlh == NULL) | 2636 | if (nlh == NULL) |
@@ -2694,20 +2677,13 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
2694 | if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) | 2677 | if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) |
2695 | goto nla_put_failure; | 2678 | goto nla_put_failure; |
2696 | 2679 | ||
2697 | ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse); | 2680 | error = rt->u.dst.error; |
2698 | ci.rta_used = rt->u.dst.__use; | 2681 | expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; |
2699 | ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt); | ||
2700 | if (rt->u.dst.expires) | ||
2701 | ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies); | ||
2702 | else | ||
2703 | ci.rta_expires = 0; | ||
2704 | ci.rta_error = rt->u.dst.error; | ||
2705 | ci.rta_id = ci.rta_ts = ci.rta_tsage = 0; | ||
2706 | if (rt->peer) { | 2682 | if (rt->peer) { |
2707 | ci.rta_id = rt->peer->ip_id_count; | 2683 | id = rt->peer->ip_id_count; |
2708 | if (rt->peer->tcp_ts_stamp) { | 2684 | if (rt->peer->tcp_ts_stamp) { |
2709 | ci.rta_ts = rt->peer->tcp_ts; | 2685 | ts = rt->peer->tcp_ts; |
2710 | ci.rta_tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp; | 2686 | tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp; |
2711 | } | 2687 | } |
2712 | } | 2688 | } |
2713 | 2689 | ||
@@ -2726,7 +2702,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
2726 | } else { | 2702 | } else { |
2727 | if (err == -EMSGSIZE) | 2703 | if (err == -EMSGSIZE) |
2728 | goto nla_put_failure; | 2704 | goto nla_put_failure; |
2729 | ci.rta_error = err; | 2705 | error = err; |
2730 | } | 2706 | } |
2731 | } | 2707 | } |
2732 | } else | 2708 | } else |
@@ -2734,7 +2710,9 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, | |||
2734 | NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif); | 2710 | NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif); |
2735 | } | 2711 | } |
2736 | 2712 | ||
2737 | NLA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci); | 2713 | if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage, |
2714 | expires, error) < 0) | ||
2715 | goto nla_put_failure; | ||
2738 | 2716 | ||
2739 | return nlmsg_end(skb, nlh); | 2717 | return nlmsg_end(skb, nlh); |
2740 | 2718 | ||
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 661e0a4bca72..6b19530905af 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -35,23 +35,23 @@ module_init(init_syncookies); | |||
35 | #define COOKIEBITS 24 /* Upper bits store count */ | 35 | #define COOKIEBITS 24 /* Upper bits store count */ |
36 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) | 36 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) |
37 | 37 | ||
38 | static u32 cookie_hash(u32 saddr, u32 daddr, u32 sport, u32 dport, | 38 | static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, |
39 | u32 count, int c) | 39 | u32 count, int c) |
40 | { | 40 | { |
41 | __u32 tmp[16 + 5 + SHA_WORKSPACE_WORDS]; | 41 | __u32 tmp[16 + 5 + SHA_WORKSPACE_WORDS]; |
42 | 42 | ||
43 | memcpy(tmp + 3, syncookie_secret[c], sizeof(syncookie_secret[c])); | 43 | memcpy(tmp + 3, syncookie_secret[c], sizeof(syncookie_secret[c])); |
44 | tmp[0] = saddr; | 44 | tmp[0] = (__force u32)saddr; |
45 | tmp[1] = daddr; | 45 | tmp[1] = (__force u32)daddr; |
46 | tmp[2] = (sport << 16) + dport; | 46 | tmp[2] = ((__force u32)sport << 16) + (__force u32)dport; |
47 | tmp[3] = count; | 47 | tmp[3] = count; |
48 | sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5); | 48 | sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5); |
49 | 49 | ||
50 | return tmp[17]; | 50 | return tmp[17]; |
51 | } | 51 | } |
52 | 52 | ||
53 | static __u32 secure_tcp_syn_cookie(__u32 saddr, __u32 daddr, __u16 sport, | 53 | static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport, |
54 | __u16 dport, __u32 sseq, __u32 count, | 54 | __be16 dport, __u32 sseq, __u32 count, |
55 | __u32 data) | 55 | __u32 data) |
56 | { | 56 | { |
57 | /* | 57 | /* |
@@ -80,8 +80,8 @@ static __u32 secure_tcp_syn_cookie(__u32 saddr, __u32 daddr, __u16 sport, | |||
80 | * "maxdiff" if the current (passed-in) "count". The return value | 80 | * "maxdiff" if the current (passed-in) "count". The return value |
81 | * is (__u32)-1 if this test fails. | 81 | * is (__u32)-1 if this test fails. |
82 | */ | 82 | */ |
83 | static __u32 check_tcp_syn_cookie(__u32 cookie, __u32 saddr, __u32 daddr, | 83 | static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr, |
84 | __u16 sport, __u16 dport, __u32 sseq, | 84 | __be16 sport, __be16 dport, __u32 sseq, |
85 | __u32 count, __u32 maxdiff) | 85 | __u32 count, __u32 maxdiff) |
86 | { | 86 | { |
87 | __u32 diff; | 87 | __u32 diff; |
@@ -220,7 +220,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
220 | } | 220 | } |
221 | ireq = inet_rsk(req); | 221 | ireq = inet_rsk(req); |
222 | treq = tcp_rsk(req); | 222 | treq = tcp_rsk(req); |
223 | treq->rcv_isn = htonl(skb->h.th->seq) - 1; | 223 | treq->rcv_isn = ntohl(skb->h.th->seq) - 1; |
224 | treq->snt_isn = cookie; | 224 | treq->snt_isn = cookie; |
225 | req->mss = mss; | 225 | req->mss = mss; |
226 | ireq->rmt_port = skb->h.th->source; | 226 | ireq->rmt_port = skb->h.th->source; |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 15061b314411..dfcf47f10f88 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -129,6 +129,67 @@ static int sysctl_tcp_congestion_control(ctl_table *table, int __user *name, | |||
129 | return ret; | 129 | return ret; |
130 | } | 130 | } |
131 | 131 | ||
132 | static int proc_tcp_available_congestion_control(ctl_table *ctl, | ||
133 | int write, struct file * filp, | ||
134 | void __user *buffer, size_t *lenp, | ||
135 | loff_t *ppos) | ||
136 | { | ||
137 | ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, }; | ||
138 | int ret; | ||
139 | |||
140 | tbl.data = kmalloc(tbl.maxlen, GFP_USER); | ||
141 | if (!tbl.data) | ||
142 | return -ENOMEM; | ||
143 | tcp_get_available_congestion_control(tbl.data, TCP_CA_BUF_MAX); | ||
144 | ret = proc_dostring(&tbl, write, filp, buffer, lenp, ppos); | ||
145 | kfree(tbl.data); | ||
146 | return ret; | ||
147 | } | ||
148 | |||
149 | static int proc_allowed_congestion_control(ctl_table *ctl, | ||
150 | int write, struct file * filp, | ||
151 | void __user *buffer, size_t *lenp, | ||
152 | loff_t *ppos) | ||
153 | { | ||
154 | ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX }; | ||
155 | int ret; | ||
156 | |||
157 | tbl.data = kmalloc(tbl.maxlen, GFP_USER); | ||
158 | if (!tbl.data) | ||
159 | return -ENOMEM; | ||
160 | |||
161 | tcp_get_allowed_congestion_control(tbl.data, tbl.maxlen); | ||
162 | ret = proc_dostring(&tbl, write, filp, buffer, lenp, ppos); | ||
163 | if (write && ret == 0) | ||
164 | ret = tcp_set_allowed_congestion_control(tbl.data); | ||
165 | kfree(tbl.data); | ||
166 | return ret; | ||
167 | } | ||
168 | |||
169 | static int strategy_allowed_congestion_control(ctl_table *table, int __user *name, | ||
170 | int nlen, void __user *oldval, | ||
171 | size_t __user *oldlenp, | ||
172 | void __user *newval, size_t newlen, | ||
173 | void **context) | ||
174 | { | ||
175 | ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX }; | ||
176 | int ret; | ||
177 | |||
178 | tbl.data = kmalloc(tbl.maxlen, GFP_USER); | ||
179 | if (!tbl.data) | ||
180 | return -ENOMEM; | ||
181 | |||
182 | tcp_get_available_congestion_control(tbl.data, tbl.maxlen); | ||
183 | ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen, | ||
184 | context); | ||
185 | if (ret == 0 && newval && newlen) | ||
186 | ret = tcp_set_allowed_congestion_control(tbl.data); | ||
187 | kfree(tbl.data); | ||
188 | |||
189 | return ret; | ||
190 | |||
191 | } | ||
192 | |||
132 | ctl_table ipv4_table[] = { | 193 | ctl_table ipv4_table[] = { |
133 | { | 194 | { |
134 | .ctl_name = NET_IPV4_TCP_TIMESTAMPS, | 195 | .ctl_name = NET_IPV4_TCP_TIMESTAMPS, |
@@ -731,6 +792,21 @@ ctl_table ipv4_table[] = { | |||
731 | .proc_handler = &proc_dointvec, | 792 | .proc_handler = &proc_dointvec, |
732 | }, | 793 | }, |
733 | #endif /* CONFIG_NETLABEL */ | 794 | #endif /* CONFIG_NETLABEL */ |
795 | { | ||
796 | .ctl_name = NET_TCP_AVAIL_CONG_CONTROL, | ||
797 | .procname = "tcp_available_congestion_control", | ||
798 | .maxlen = TCP_CA_BUF_MAX, | ||
799 | .mode = 0444, | ||
800 | .proc_handler = &proc_tcp_available_congestion_control, | ||
801 | }, | ||
802 | { | ||
803 | .ctl_name = NET_TCP_ALLOWED_CONG_CONTROL, | ||
804 | .procname = "tcp_allowed_congestion_control", | ||
805 | .maxlen = TCP_CA_BUF_MAX, | ||
806 | .mode = 0644, | ||
807 | .proc_handler = &proc_allowed_congestion_control, | ||
808 | .strategy = &strategy_allowed_congestion_control, | ||
809 | }, | ||
734 | { .ctl_name = 0 } | 810 | { .ctl_name = 0 } |
735 | }; | 811 | }; |
736 | 812 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c05e8edaf544..090c690627e5 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -258,6 +258,7 @@ | |||
258 | #include <linux/bootmem.h> | 258 | #include <linux/bootmem.h> |
259 | #include <linux/cache.h> | 259 | #include <linux/cache.h> |
260 | #include <linux/err.h> | 260 | #include <linux/err.h> |
261 | #include <linux/crypto.h> | ||
261 | 262 | ||
262 | #include <net/icmp.h> | 263 | #include <net/icmp.h> |
263 | #include <net/tcp.h> | 264 | #include <net/tcp.h> |
@@ -462,11 +463,12 @@ static inline int forced_push(struct tcp_sock *tp) | |||
462 | static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, | 463 | static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, |
463 | struct sk_buff *skb) | 464 | struct sk_buff *skb) |
464 | { | 465 | { |
465 | skb->csum = 0; | 466 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
466 | TCP_SKB_CB(skb)->seq = tp->write_seq; | 467 | |
467 | TCP_SKB_CB(skb)->end_seq = tp->write_seq; | 468 | skb->csum = 0; |
468 | TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; | 469 | tcb->seq = tcb->end_seq = tp->write_seq; |
469 | TCP_SKB_CB(skb)->sacked = 0; | 470 | tcb->flags = TCPCB_FLAG_ACK; |
471 | tcb->sacked = 0; | ||
470 | skb_header_release(skb); | 472 | skb_header_release(skb); |
471 | __skb_queue_tail(&sk->sk_write_queue, skb); | 473 | __skb_queue_tail(&sk->sk_write_queue, skb); |
472 | sk_charge_skb(sk, skb); | 474 | sk_charge_skb(sk, skb); |
@@ -1942,6 +1944,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
1942 | } | 1944 | } |
1943 | break; | 1945 | break; |
1944 | 1946 | ||
1947 | #ifdef CONFIG_TCP_MD5SIG | ||
1948 | case TCP_MD5SIG: | ||
1949 | /* Read the IP->Key mappings from userspace */ | ||
1950 | err = tp->af_specific->md5_parse(sk, optval, optlen); | ||
1951 | break; | ||
1952 | #endif | ||
1953 | |||
1945 | default: | 1954 | default: |
1946 | err = -ENOPROTOOPT; | 1955 | err = -ENOPROTOOPT; |
1947 | break; | 1956 | break; |
@@ -2154,7 +2163,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) | |||
2154 | struct tcphdr *th; | 2163 | struct tcphdr *th; |
2155 | unsigned thlen; | 2164 | unsigned thlen; |
2156 | unsigned int seq; | 2165 | unsigned int seq; |
2157 | unsigned int delta; | 2166 | __be32 delta; |
2158 | unsigned int oldlen; | 2167 | unsigned int oldlen; |
2159 | unsigned int len; | 2168 | unsigned int len; |
2160 | 2169 | ||
@@ -2207,7 +2216,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) | |||
2207 | do { | 2216 | do { |
2208 | th->fin = th->psh = 0; | 2217 | th->fin = th->psh = 0; |
2209 | 2218 | ||
2210 | th->check = ~csum_fold(th->check + delta); | 2219 | th->check = ~csum_fold((__force __wsum)((__force u32)th->check + |
2220 | (__force u32)delta)); | ||
2211 | if (skb->ip_summed != CHECKSUM_PARTIAL) | 2221 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
2212 | th->check = csum_fold(csum_partial(skb->h.raw, thlen, | 2222 | th->check = csum_fold(csum_partial(skb->h.raw, thlen, |
2213 | skb->csum)); | 2223 | skb->csum)); |
@@ -2221,7 +2231,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) | |||
2221 | } while (skb->next); | 2231 | } while (skb->next); |
2222 | 2232 | ||
2223 | delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len); | 2233 | delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len); |
2224 | th->check = ~csum_fold(th->check + delta); | 2234 | th->check = ~csum_fold((__force __wsum)((__force u32)th->check + |
2235 | (__force u32)delta)); | ||
2225 | if (skb->ip_summed != CHECKSUM_PARTIAL) | 2236 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
2226 | th->check = csum_fold(csum_partial(skb->h.raw, thlen, | 2237 | th->check = csum_fold(csum_partial(skb->h.raw, thlen, |
2227 | skb->csum)); | 2238 | skb->csum)); |
@@ -2231,6 +2242,135 @@ out: | |||
2231 | } | 2242 | } |
2232 | EXPORT_SYMBOL(tcp_tso_segment); | 2243 | EXPORT_SYMBOL(tcp_tso_segment); |
2233 | 2244 | ||
2245 | #ifdef CONFIG_TCP_MD5SIG | ||
2246 | static unsigned long tcp_md5sig_users; | ||
2247 | static struct tcp_md5sig_pool **tcp_md5sig_pool; | ||
2248 | static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); | ||
2249 | |||
2250 | static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) | ||
2251 | { | ||
2252 | int cpu; | ||
2253 | for_each_possible_cpu(cpu) { | ||
2254 | struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu); | ||
2255 | if (p) { | ||
2256 | if (p->md5_desc.tfm) | ||
2257 | crypto_free_hash(p->md5_desc.tfm); | ||
2258 | kfree(p); | ||
2259 | p = NULL; | ||
2260 | } | ||
2261 | } | ||
2262 | free_percpu(pool); | ||
2263 | } | ||
2264 | |||
2265 | void tcp_free_md5sig_pool(void) | ||
2266 | { | ||
2267 | struct tcp_md5sig_pool **pool = NULL; | ||
2268 | |||
2269 | spin_lock(&tcp_md5sig_pool_lock); | ||
2270 | if (--tcp_md5sig_users == 0) { | ||
2271 | pool = tcp_md5sig_pool; | ||
2272 | tcp_md5sig_pool = NULL; | ||
2273 | } | ||
2274 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2275 | if (pool) | ||
2276 | __tcp_free_md5sig_pool(pool); | ||
2277 | } | ||
2278 | |||
2279 | EXPORT_SYMBOL(tcp_free_md5sig_pool); | ||
2280 | |||
2281 | static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) | ||
2282 | { | ||
2283 | int cpu; | ||
2284 | struct tcp_md5sig_pool **pool; | ||
2285 | |||
2286 | pool = alloc_percpu(struct tcp_md5sig_pool *); | ||
2287 | if (!pool) | ||
2288 | return NULL; | ||
2289 | |||
2290 | for_each_possible_cpu(cpu) { | ||
2291 | struct tcp_md5sig_pool *p; | ||
2292 | struct crypto_hash *hash; | ||
2293 | |||
2294 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
2295 | if (!p) | ||
2296 | goto out_free; | ||
2297 | *per_cpu_ptr(pool, cpu) = p; | ||
2298 | |||
2299 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); | ||
2300 | if (!hash || IS_ERR(hash)) | ||
2301 | goto out_free; | ||
2302 | |||
2303 | p->md5_desc.tfm = hash; | ||
2304 | } | ||
2305 | return pool; | ||
2306 | out_free: | ||
2307 | __tcp_free_md5sig_pool(pool); | ||
2308 | return NULL; | ||
2309 | } | ||
2310 | |||
2311 | struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void) | ||
2312 | { | ||
2313 | struct tcp_md5sig_pool **pool; | ||
2314 | int alloc = 0; | ||
2315 | |||
2316 | retry: | ||
2317 | spin_lock(&tcp_md5sig_pool_lock); | ||
2318 | pool = tcp_md5sig_pool; | ||
2319 | if (tcp_md5sig_users++ == 0) { | ||
2320 | alloc = 1; | ||
2321 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2322 | } else if (!pool) { | ||
2323 | tcp_md5sig_users--; | ||
2324 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2325 | cpu_relax(); | ||
2326 | goto retry; | ||
2327 | } else | ||
2328 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2329 | |||
2330 | if (alloc) { | ||
2331 | /* we cannot hold spinlock here because this may sleep. */ | ||
2332 | struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); | ||
2333 | spin_lock(&tcp_md5sig_pool_lock); | ||
2334 | if (!p) { | ||
2335 | tcp_md5sig_users--; | ||
2336 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2337 | return NULL; | ||
2338 | } | ||
2339 | pool = tcp_md5sig_pool; | ||
2340 | if (pool) { | ||
2341 | /* oops, it has already been assigned. */ | ||
2342 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2343 | __tcp_free_md5sig_pool(p); | ||
2344 | } else { | ||
2345 | tcp_md5sig_pool = pool = p; | ||
2346 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2347 | } | ||
2348 | } | ||
2349 | return pool; | ||
2350 | } | ||
2351 | |||
2352 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | ||
2353 | |||
2354 | struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) | ||
2355 | { | ||
2356 | struct tcp_md5sig_pool **p; | ||
2357 | spin_lock(&tcp_md5sig_pool_lock); | ||
2358 | p = tcp_md5sig_pool; | ||
2359 | if (p) | ||
2360 | tcp_md5sig_users++; | ||
2361 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2362 | return (p ? *per_cpu_ptr(p, cpu) : NULL); | ||
2363 | } | ||
2364 | |||
2365 | EXPORT_SYMBOL(__tcp_get_md5sig_pool); | ||
2366 | |||
2367 | void __tcp_put_md5sig_pool(void) { | ||
2368 | __tcp_free_md5sig_pool(tcp_md5sig_pool); | ||
2369 | } | ||
2370 | |||
2371 | EXPORT_SYMBOL(__tcp_put_md5sig_pool); | ||
2372 | #endif | ||
2373 | |||
2234 | extern void __skb_cb_too_small_for_tcp(int, int); | 2374 | extern void __skb_cb_too_small_for_tcp(int, int); |
2235 | extern struct tcp_congestion_ops tcp_reno; | 2375 | extern struct tcp_congestion_ops tcp_reno; |
2236 | 2376 | ||
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 1e2982f4acd4..5ca7723d0798 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -113,7 +113,7 @@ int tcp_set_default_congestion_control(const char *name) | |||
113 | spin_lock(&tcp_cong_list_lock); | 113 | spin_lock(&tcp_cong_list_lock); |
114 | ca = tcp_ca_find(name); | 114 | ca = tcp_ca_find(name); |
115 | #ifdef CONFIG_KMOD | 115 | #ifdef CONFIG_KMOD |
116 | if (!ca) { | 116 | if (!ca && capable(CAP_SYS_MODULE)) { |
117 | spin_unlock(&tcp_cong_list_lock); | 117 | spin_unlock(&tcp_cong_list_lock); |
118 | 118 | ||
119 | request_module("tcp_%s", name); | 119 | request_module("tcp_%s", name); |
@@ -123,6 +123,7 @@ int tcp_set_default_congestion_control(const char *name) | |||
123 | #endif | 123 | #endif |
124 | 124 | ||
125 | if (ca) { | 125 | if (ca) { |
126 | ca->non_restricted = 1; /* default is always allowed */ | ||
126 | list_move(&ca->list, &tcp_cong_list); | 127 | list_move(&ca->list, &tcp_cong_list); |
127 | ret = 0; | 128 | ret = 0; |
128 | } | 129 | } |
@@ -139,6 +140,22 @@ static int __init tcp_congestion_default(void) | |||
139 | late_initcall(tcp_congestion_default); | 140 | late_initcall(tcp_congestion_default); |
140 | 141 | ||
141 | 142 | ||
143 | /* Build string with list of available congestion control values */ | ||
144 | void tcp_get_available_congestion_control(char *buf, size_t maxlen) | ||
145 | { | ||
146 | struct tcp_congestion_ops *ca; | ||
147 | size_t offs = 0; | ||
148 | |||
149 | rcu_read_lock(); | ||
150 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { | ||
151 | offs += snprintf(buf + offs, maxlen - offs, | ||
152 | "%s%s", | ||
153 | offs == 0 ? "" : " ", ca->name); | ||
154 | |||
155 | } | ||
156 | rcu_read_unlock(); | ||
157 | } | ||
158 | |||
142 | /* Get current default congestion control */ | 159 | /* Get current default congestion control */ |
143 | void tcp_get_default_congestion_control(char *name) | 160 | void tcp_get_default_congestion_control(char *name) |
144 | { | 161 | { |
@@ -152,6 +169,64 @@ void tcp_get_default_congestion_control(char *name) | |||
152 | rcu_read_unlock(); | 169 | rcu_read_unlock(); |
153 | } | 170 | } |
154 | 171 | ||
172 | /* Built list of non-restricted congestion control values */ | ||
173 | void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) | ||
174 | { | ||
175 | struct tcp_congestion_ops *ca; | ||
176 | size_t offs = 0; | ||
177 | |||
178 | *buf = '\0'; | ||
179 | rcu_read_lock(); | ||
180 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { | ||
181 | if (!ca->non_restricted) | ||
182 | continue; | ||
183 | offs += snprintf(buf + offs, maxlen - offs, | ||
184 | "%s%s", | ||
185 | offs == 0 ? "" : " ", ca->name); | ||
186 | |||
187 | } | ||
188 | rcu_read_unlock(); | ||
189 | } | ||
190 | |||
191 | /* Change list of non-restricted congestion control */ | ||
192 | int tcp_set_allowed_congestion_control(char *val) | ||
193 | { | ||
194 | struct tcp_congestion_ops *ca; | ||
195 | char *clone, *name; | ||
196 | int ret = 0; | ||
197 | |||
198 | clone = kstrdup(val, GFP_USER); | ||
199 | if (!clone) | ||
200 | return -ENOMEM; | ||
201 | |||
202 | spin_lock(&tcp_cong_list_lock); | ||
203 | /* pass 1 check for bad entries */ | ||
204 | while ((name = strsep(&clone, " ")) && *name) { | ||
205 | ca = tcp_ca_find(name); | ||
206 | if (!ca) { | ||
207 | ret = -ENOENT; | ||
208 | goto out; | ||
209 | } | ||
210 | } | ||
211 | |||
212 | /* pass 2 clear */ | ||
213 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) | ||
214 | ca->non_restricted = 0; | ||
215 | |||
216 | /* pass 3 mark as allowed */ | ||
217 | while ((name = strsep(&val, " ")) && *name) { | ||
218 | ca = tcp_ca_find(name); | ||
219 | WARN_ON(!ca); | ||
220 | if (ca) | ||
221 | ca->non_restricted = 1; | ||
222 | } | ||
223 | out: | ||
224 | spin_unlock(&tcp_cong_list_lock); | ||
225 | |||
226 | return ret; | ||
227 | } | ||
228 | |||
229 | |||
155 | /* Change congestion control for socket */ | 230 | /* Change congestion control for socket */ |
156 | int tcp_set_congestion_control(struct sock *sk, const char *name) | 231 | int tcp_set_congestion_control(struct sock *sk, const char *name) |
157 | { | 232 | { |
@@ -161,12 +236,25 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) | |||
161 | 236 | ||
162 | rcu_read_lock(); | 237 | rcu_read_lock(); |
163 | ca = tcp_ca_find(name); | 238 | ca = tcp_ca_find(name); |
239 | /* no change asking for existing value */ | ||
164 | if (ca == icsk->icsk_ca_ops) | 240 | if (ca == icsk->icsk_ca_ops) |
165 | goto out; | 241 | goto out; |
166 | 242 | ||
243 | #ifdef CONFIG_KMOD | ||
244 | /* not found attempt to autoload module */ | ||
245 | if (!ca && capable(CAP_SYS_MODULE)) { | ||
246 | rcu_read_unlock(); | ||
247 | request_module("tcp_%s", name); | ||
248 | rcu_read_lock(); | ||
249 | ca = tcp_ca_find(name); | ||
250 | } | ||
251 | #endif | ||
167 | if (!ca) | 252 | if (!ca) |
168 | err = -ENOENT; | 253 | err = -ENOENT; |
169 | 254 | ||
255 | else if (!(ca->non_restricted || capable(CAP_NET_ADMIN))) | ||
256 | err = -EPERM; | ||
257 | |||
170 | else if (!try_module_get(ca->owner)) | 258 | else if (!try_module_get(ca->owner)) |
171 | err = -EBUSY; | 259 | err = -EBUSY; |
172 | 260 | ||
@@ -268,6 +356,7 @@ EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd); | |||
268 | 356 | ||
269 | struct tcp_congestion_ops tcp_reno = { | 357 | struct tcp_congestion_ops tcp_reno = { |
270 | .name = "reno", | 358 | .name = "reno", |
359 | .non_restricted = 1, | ||
271 | .owner = THIS_MODULE, | 360 | .owner = THIS_MODULE, |
272 | .ssthresh = tcp_reno_ssthresh, | 361 | .ssthresh = tcp_reno_ssthresh, |
273 | .cong_avoid = tcp_reno_cong_avoid, | 362 | .cong_avoid = tcp_reno_cong_avoid, |
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 283be3cb4667..753987a1048f 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c | |||
@@ -26,12 +26,12 @@ struct htcp { | |||
26 | u32 alpha; /* Fixed point arith, << 7 */ | 26 | u32 alpha; /* Fixed point arith, << 7 */ |
27 | u8 beta; /* Fixed point arith, << 7 */ | 27 | u8 beta; /* Fixed point arith, << 7 */ |
28 | u8 modeswitch; /* Delay modeswitch until we had at least one congestion event */ | 28 | u8 modeswitch; /* Delay modeswitch until we had at least one congestion event */ |
29 | u32 last_cong; /* Time since last congestion event end */ | ||
30 | u32 undo_last_cong; | ||
31 | u16 pkts_acked; | 29 | u16 pkts_acked; |
32 | u32 packetcount; | 30 | u32 packetcount; |
33 | u32 minRTT; | 31 | u32 minRTT; |
34 | u32 maxRTT; | 32 | u32 maxRTT; |
33 | u32 last_cong; /* Time since last congestion event end */ | ||
34 | u32 undo_last_cong; | ||
35 | 35 | ||
36 | u32 undo_maxRTT; | 36 | u32 undo_maxRTT; |
37 | u32 undo_old_maxB; | 37 | u32 undo_old_maxB; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index cf06accbe687..9304034c0c47 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2677,6 +2677,14 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
2677 | opt_rx->sack_ok) { | 2677 | opt_rx->sack_ok) { |
2678 | TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; | 2678 | TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; |
2679 | } | 2679 | } |
2680 | #ifdef CONFIG_TCP_MD5SIG | ||
2681 | case TCPOPT_MD5SIG: | ||
2682 | /* | ||
2683 | * The MD5 Hash has already been | ||
2684 | * checked (see tcp_v{4,6}_do_rcv()). | ||
2685 | */ | ||
2686 | break; | ||
2687 | #endif | ||
2680 | }; | 2688 | }; |
2681 | ptr+=opsize-2; | 2689 | ptr+=opsize-2; |
2682 | length-=opsize; | 2690 | length-=opsize; |
@@ -3782,9 +3790,9 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) | |||
3782 | return err; | 3790 | return err; |
3783 | } | 3791 | } |
3784 | 3792 | ||
3785 | static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) | 3793 | static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) |
3786 | { | 3794 | { |
3787 | int result; | 3795 | __sum16 result; |
3788 | 3796 | ||
3789 | if (sock_owned_by_user(sk)) { | 3797 | if (sock_owned_by_user(sk)) { |
3790 | local_bh_enable(); | 3798 | local_bh_enable(); |
@@ -4230,6 +4238,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
4230 | mb(); | 4238 | mb(); |
4231 | tcp_set_state(sk, TCP_ESTABLISHED); | 4239 | tcp_set_state(sk, TCP_ESTABLISHED); |
4232 | 4240 | ||
4241 | security_inet_conn_established(sk, skb); | ||
4242 | |||
4233 | /* Make sure socket is routed, for correct metrics. */ | 4243 | /* Make sure socket is routed, for correct metrics. */ |
4234 | icsk->icsk_af_ops->rebuild_header(sk); | 4244 | icsk->icsk_af_ops->rebuild_header(sk); |
4235 | 4245 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 22ef8bd26620..a1222d6968c4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -78,6 +78,9 @@ | |||
78 | #include <linux/proc_fs.h> | 78 | #include <linux/proc_fs.h> |
79 | #include <linux/seq_file.h> | 79 | #include <linux/seq_file.h> |
80 | 80 | ||
81 | #include <linux/crypto.h> | ||
82 | #include <linux/scatterlist.h> | ||
83 | |||
81 | int sysctl_tcp_tw_reuse __read_mostly; | 84 | int sysctl_tcp_tw_reuse __read_mostly; |
82 | int sysctl_tcp_low_latency __read_mostly; | 85 | int sysctl_tcp_low_latency __read_mostly; |
83 | 86 | ||
@@ -89,10 +92,19 @@ static struct socket *tcp_socket; | |||
89 | 92 | ||
90 | void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); | 93 | void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); |
91 | 94 | ||
95 | #ifdef CONFIG_TCP_MD5SIG | ||
96 | static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, | ||
97 | __be32 addr); | ||
98 | static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | ||
99 | __be32 saddr, __be32 daddr, | ||
100 | struct tcphdr *th, int protocol, | ||
101 | int tcplen); | ||
102 | #endif | ||
103 | |||
92 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { | 104 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { |
93 | .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), | 105 | .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), |
94 | .lhash_users = ATOMIC_INIT(0), | 106 | .lhash_users = ATOMIC_INIT(0), |
95 | .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), | 107 | .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), |
96 | }; | 108 | }; |
97 | 109 | ||
98 | static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | 110 | static int tcp_v4_get_port(struct sock *sk, unsigned short snum) |
@@ -111,7 +123,7 @@ void tcp_unhash(struct sock *sk) | |||
111 | inet_unhash(&tcp_hashinfo, sk); | 123 | inet_unhash(&tcp_hashinfo, sk); |
112 | } | 124 | } |
113 | 125 | ||
114 | static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb) | 126 | static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) |
115 | { | 127 | { |
116 | return secure_tcp_sequence_number(skb->nh.iph->daddr, | 128 | return secure_tcp_sequence_number(skb->nh.iph->daddr, |
117 | skb->nh.iph->saddr, | 129 | skb->nh.iph->saddr, |
@@ -205,13 +217,14 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
205 | if (tcp_death_row.sysctl_tw_recycle && | 217 | if (tcp_death_row.sysctl_tw_recycle && |
206 | !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { | 218 | !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { |
207 | struct inet_peer *peer = rt_get_peer(rt); | 219 | struct inet_peer *peer = rt_get_peer(rt); |
208 | 220 | /* | |
209 | /* VJ's idea. We save last timestamp seen from | 221 | * VJ's idea. We save last timestamp seen from |
210 | * the destination in peer table, when entering state TIME-WAIT | 222 | * the destination in peer table, when entering state |
211 | * and initialize rx_opt.ts_recent from it, when trying new connection. | 223 | * TIME-WAIT * and initialize rx_opt.ts_recent from it, |
224 | * when trying new connection. | ||
212 | */ | 225 | */ |
213 | 226 | if (peer != NULL && | |
214 | if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) { | 227 | peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) { |
215 | tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; | 228 | tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; |
216 | tp->rx_opt.ts_recent = peer->tcp_ts; | 229 | tp->rx_opt.ts_recent = peer->tcp_ts; |
217 | } | 230 | } |
@@ -236,7 +249,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
236 | if (err) | 249 | if (err) |
237 | goto failure; | 250 | goto failure; |
238 | 251 | ||
239 | err = ip_route_newports(&rt, IPPROTO_TCP, inet->sport, inet->dport, sk); | 252 | err = ip_route_newports(&rt, IPPROTO_TCP, |
253 | inet->sport, inet->dport, sk); | ||
240 | if (err) | 254 | if (err) |
241 | goto failure; | 255 | goto failure; |
242 | 256 | ||
@@ -260,7 +274,10 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
260 | return 0; | 274 | return 0; |
261 | 275 | ||
262 | failure: | 276 | failure: |
263 | /* This unhashes the socket and releases the local port, if necessary. */ | 277 | /* |
278 | * This unhashes the socket and releases the local port, | ||
279 | * if necessary. | ||
280 | */ | ||
264 | tcp_set_state(sk, TCP_CLOSE); | 281 | tcp_set_state(sk, TCP_CLOSE); |
265 | ip_rt_put(rt); | 282 | ip_rt_put(rt); |
266 | sk->sk_route_caps = 0; | 283 | sk->sk_route_caps = 0; |
@@ -485,8 +502,9 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) | |||
485 | struct tcphdr *th = skb->h.th; | 502 | struct tcphdr *th = skb->h.th; |
486 | 503 | ||
487 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 504 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
488 | th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0); | 505 | th->check = ~tcp_v4_check(th, len, |
489 | skb->csum = offsetof(struct tcphdr, check); | 506 | inet->saddr, inet->daddr, 0); |
507 | skb->csum_offset = offsetof(struct tcphdr, check); | ||
490 | } else { | 508 | } else { |
491 | th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr, | 509 | th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr, |
492 | csum_partial((char *)th, | 510 | csum_partial((char *)th, |
@@ -508,7 +526,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb) | |||
508 | 526 | ||
509 | th->check = 0; | 527 | th->check = 0; |
510 | th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0); | 528 | th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0); |
511 | skb->csum = offsetof(struct tcphdr, check); | 529 | skb->csum_offset = offsetof(struct tcphdr, check); |
512 | skb->ip_summed = CHECKSUM_PARTIAL; | 530 | skb->ip_summed = CHECKSUM_PARTIAL; |
513 | return 0; | 531 | return 0; |
514 | } | 532 | } |
@@ -526,11 +544,19 @@ int tcp_v4_gso_send_check(struct sk_buff *skb) | |||
526 | * Exception: precedence violation. We do not implement it in any case. | 544 | * Exception: precedence violation. We do not implement it in any case. |
527 | */ | 545 | */ |
528 | 546 | ||
529 | static void tcp_v4_send_reset(struct sk_buff *skb) | 547 | static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) |
530 | { | 548 | { |
531 | struct tcphdr *th = skb->h.th; | 549 | struct tcphdr *th = skb->h.th; |
532 | struct tcphdr rth; | 550 | struct { |
551 | struct tcphdr th; | ||
552 | #ifdef CONFIG_TCP_MD5SIG | ||
553 | __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)]; | ||
554 | #endif | ||
555 | } rep; | ||
533 | struct ip_reply_arg arg; | 556 | struct ip_reply_arg arg; |
557 | #ifdef CONFIG_TCP_MD5SIG | ||
558 | struct tcp_md5sig_key *key; | ||
559 | #endif | ||
534 | 560 | ||
535 | /* Never send a reset in response to a reset. */ | 561 | /* Never send a reset in response to a reset. */ |
536 | if (th->rst) | 562 | if (th->rst) |
@@ -540,29 +566,49 @@ static void tcp_v4_send_reset(struct sk_buff *skb) | |||
540 | return; | 566 | return; |
541 | 567 | ||
542 | /* Swap the send and the receive. */ | 568 | /* Swap the send and the receive. */ |
543 | memset(&rth, 0, sizeof(struct tcphdr)); | 569 | memset(&rep, 0, sizeof(rep)); |
544 | rth.dest = th->source; | 570 | rep.th.dest = th->source; |
545 | rth.source = th->dest; | 571 | rep.th.source = th->dest; |
546 | rth.doff = sizeof(struct tcphdr) / 4; | 572 | rep.th.doff = sizeof(struct tcphdr) / 4; |
547 | rth.rst = 1; | 573 | rep.th.rst = 1; |
548 | 574 | ||
549 | if (th->ack) { | 575 | if (th->ack) { |
550 | rth.seq = th->ack_seq; | 576 | rep.th.seq = th->ack_seq; |
551 | } else { | 577 | } else { |
552 | rth.ack = 1; | 578 | rep.th.ack = 1; |
553 | rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + | 579 | rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + |
554 | skb->len - (th->doff << 2)); | 580 | skb->len - (th->doff << 2)); |
555 | } | 581 | } |
556 | 582 | ||
557 | memset(&arg, 0, sizeof arg); | 583 | memset(&arg, 0, sizeof(arg)); |
558 | arg.iov[0].iov_base = (unsigned char *)&rth; | 584 | arg.iov[0].iov_base = (unsigned char *)&rep; |
559 | arg.iov[0].iov_len = sizeof rth; | 585 | arg.iov[0].iov_len = sizeof(rep.th); |
586 | |||
587 | #ifdef CONFIG_TCP_MD5SIG | ||
588 | key = sk ? tcp_v4_md5_do_lookup(sk, skb->nh.iph->daddr) : NULL; | ||
589 | if (key) { | ||
590 | rep.opt[0] = htonl((TCPOPT_NOP << 24) | | ||
591 | (TCPOPT_NOP << 16) | | ||
592 | (TCPOPT_MD5SIG << 8) | | ||
593 | TCPOLEN_MD5SIG); | ||
594 | /* Update length and the length the header thinks exists */ | ||
595 | arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; | ||
596 | rep.th.doff = arg.iov[0].iov_len / 4; | ||
597 | |||
598 | tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1], | ||
599 | key, | ||
600 | skb->nh.iph->daddr, | ||
601 | skb->nh.iph->saddr, | ||
602 | &rep.th, IPPROTO_TCP, | ||
603 | arg.iov[0].iov_len); | ||
604 | } | ||
605 | #endif | ||
560 | arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, | 606 | arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, |
561 | skb->nh.iph->saddr, /*XXX*/ | 607 | skb->nh.iph->saddr, /* XXX */ |
562 | sizeof(struct tcphdr), IPPROTO_TCP, 0); | 608 | sizeof(struct tcphdr), IPPROTO_TCP, 0); |
563 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | 609 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; |
564 | 610 | ||
565 | ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth); | 611 | ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len); |
566 | 612 | ||
567 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); | 613 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); |
568 | TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); | 614 | TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); |
@@ -572,28 +618,37 @@ static void tcp_v4_send_reset(struct sk_buff *skb) | |||
572 | outside socket context is ugly, certainly. What can I do? | 618 | outside socket context is ugly, certainly. What can I do? |
573 | */ | 619 | */ |
574 | 620 | ||
575 | static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | 621 | static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, |
622 | struct sk_buff *skb, u32 seq, u32 ack, | ||
576 | u32 win, u32 ts) | 623 | u32 win, u32 ts) |
577 | { | 624 | { |
578 | struct tcphdr *th = skb->h.th; | 625 | struct tcphdr *th = skb->h.th; |
579 | struct { | 626 | struct { |
580 | struct tcphdr th; | 627 | struct tcphdr th; |
581 | u32 tsopt[TCPOLEN_TSTAMP_ALIGNED >> 2]; | 628 | __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) |
629 | #ifdef CONFIG_TCP_MD5SIG | ||
630 | + (TCPOLEN_MD5SIG_ALIGNED >> 2) | ||
631 | #endif | ||
632 | ]; | ||
582 | } rep; | 633 | } rep; |
583 | struct ip_reply_arg arg; | 634 | struct ip_reply_arg arg; |
635 | #ifdef CONFIG_TCP_MD5SIG | ||
636 | struct tcp_md5sig_key *key; | ||
637 | struct tcp_md5sig_key tw_key; | ||
638 | #endif | ||
584 | 639 | ||
585 | memset(&rep.th, 0, sizeof(struct tcphdr)); | 640 | memset(&rep.th, 0, sizeof(struct tcphdr)); |
586 | memset(&arg, 0, sizeof arg); | 641 | memset(&arg, 0, sizeof(arg)); |
587 | 642 | ||
588 | arg.iov[0].iov_base = (unsigned char *)&rep; | 643 | arg.iov[0].iov_base = (unsigned char *)&rep; |
589 | arg.iov[0].iov_len = sizeof(rep.th); | 644 | arg.iov[0].iov_len = sizeof(rep.th); |
590 | if (ts) { | 645 | if (ts) { |
591 | rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | 646 | rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | |
592 | (TCPOPT_TIMESTAMP << 8) | | 647 | (TCPOPT_TIMESTAMP << 8) | |
593 | TCPOLEN_TIMESTAMP); | 648 | TCPOLEN_TIMESTAMP); |
594 | rep.tsopt[1] = htonl(tcp_time_stamp); | 649 | rep.opt[1] = htonl(tcp_time_stamp); |
595 | rep.tsopt[2] = htonl(ts); | 650 | rep.opt[2] = htonl(ts); |
596 | arg.iov[0].iov_len = sizeof(rep); | 651 | arg.iov[0].iov_len = TCPOLEN_TSTAMP_ALIGNED; |
597 | } | 652 | } |
598 | 653 | ||
599 | /* Swap the send and the receive. */ | 654 | /* Swap the send and the receive. */ |
@@ -605,8 +660,44 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | |||
605 | rep.th.ack = 1; | 660 | rep.th.ack = 1; |
606 | rep.th.window = htons(win); | 661 | rep.th.window = htons(win); |
607 | 662 | ||
663 | #ifdef CONFIG_TCP_MD5SIG | ||
664 | /* | ||
665 | * The SKB holds an imcoming packet, but may not have a valid ->sk | ||
666 | * pointer. This is especially the case when we're dealing with a | ||
667 | * TIME_WAIT ack, because the sk structure is long gone, and only | ||
668 | * the tcp_timewait_sock remains. So the md5 key is stashed in that | ||
669 | * structure, and we use it in preference. I believe that (twsk || | ||
670 | * skb->sk) holds true, but we program defensively. | ||
671 | */ | ||
672 | if (!twsk && skb->sk) { | ||
673 | key = tcp_v4_md5_do_lookup(skb->sk, skb->nh.iph->daddr); | ||
674 | } else if (twsk && twsk->tw_md5_keylen) { | ||
675 | tw_key.key = twsk->tw_md5_key; | ||
676 | tw_key.keylen = twsk->tw_md5_keylen; | ||
677 | key = &tw_key; | ||
678 | } else | ||
679 | key = NULL; | ||
680 | |||
681 | if (key) { | ||
682 | int offset = (ts) ? 3 : 0; | ||
683 | |||
684 | rep.opt[offset++] = htonl((TCPOPT_NOP << 24) | | ||
685 | (TCPOPT_NOP << 16) | | ||
686 | (TCPOPT_MD5SIG << 8) | | ||
687 | TCPOLEN_MD5SIG); | ||
688 | arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; | ||
689 | rep.th.doff = arg.iov[0].iov_len/4; | ||
690 | |||
691 | tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset], | ||
692 | key, | ||
693 | skb->nh.iph->daddr, | ||
694 | skb->nh.iph->saddr, | ||
695 | &rep.th, IPPROTO_TCP, | ||
696 | arg.iov[0].iov_len); | ||
697 | } | ||
698 | #endif | ||
608 | arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, | 699 | arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, |
609 | skb->nh.iph->saddr, /*XXX*/ | 700 | skb->nh.iph->saddr, /* XXX */ |
610 | arg.iov[0].iov_len, IPPROTO_TCP, 0); | 701 | arg.iov[0].iov_len, IPPROTO_TCP, 0); |
611 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | 702 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; |
612 | 703 | ||
@@ -618,17 +709,20 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | |||
618 | static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) | 709 | static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) |
619 | { | 710 | { |
620 | struct inet_timewait_sock *tw = inet_twsk(sk); | 711 | struct inet_timewait_sock *tw = inet_twsk(sk); |
621 | const struct tcp_timewait_sock *tcptw = tcp_twsk(sk); | 712 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk); |
622 | 713 | ||
623 | tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, | 714 | tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, |
624 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent); | 715 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, |
716 | tcptw->tw_ts_recent); | ||
625 | 717 | ||
626 | inet_twsk_put(tw); | 718 | inet_twsk_put(tw); |
627 | } | 719 | } |
628 | 720 | ||
629 | static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) | 721 | static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, |
722 | struct request_sock *req) | ||
630 | { | 723 | { |
631 | tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, | 724 | tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, |
725 | tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, | ||
632 | req->ts_recent); | 726 | req->ts_recent); |
633 | } | 727 | } |
634 | 728 | ||
@@ -662,8 +756,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, | |||
662 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, | 756 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, |
663 | ireq->rmt_addr, | 757 | ireq->rmt_addr, |
664 | ireq->opt); | 758 | ireq->opt); |
665 | if (err == NET_XMIT_CN) | 759 | err = net_xmit_eval(err); |
666 | err = 0; | ||
667 | } | 760 | } |
668 | 761 | ||
669 | out: | 762 | out: |
@@ -715,7 +808,423 @@ static struct ip_options *tcp_v4_save_options(struct sock *sk, | |||
715 | return dopt; | 808 | return dopt; |
716 | } | 809 | } |
717 | 810 | ||
718 | struct request_sock_ops tcp_request_sock_ops = { | 811 | #ifdef CONFIG_TCP_MD5SIG |
812 | /* | ||
813 | * RFC2385 MD5 checksumming requires a mapping of | ||
814 | * IP address->MD5 Key. | ||
815 | * We need to maintain these in the sk structure. | ||
816 | */ | ||
817 | |||
818 | /* Find the Key structure for an address. */ | ||
819 | static struct tcp_md5sig_key * | ||
820 | tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) | ||
821 | { | ||
822 | struct tcp_sock *tp = tcp_sk(sk); | ||
823 | int i; | ||
824 | |||
825 | if (!tp->md5sig_info || !tp->md5sig_info->entries4) | ||
826 | return NULL; | ||
827 | for (i = 0; i < tp->md5sig_info->entries4; i++) { | ||
828 | if (tp->md5sig_info->keys4[i].addr == addr) | ||
829 | return (struct tcp_md5sig_key *) | ||
830 | &tp->md5sig_info->keys4[i]; | ||
831 | } | ||
832 | return NULL; | ||
833 | } | ||
834 | |||
835 | struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, | ||
836 | struct sock *addr_sk) | ||
837 | { | ||
838 | return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr); | ||
839 | } | ||
840 | |||
841 | EXPORT_SYMBOL(tcp_v4_md5_lookup); | ||
842 | |||
843 | static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk, | ||
844 | struct request_sock *req) | ||
845 | { | ||
846 | return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr); | ||
847 | } | ||
848 | |||
849 | /* This can be called on a newly created socket, from other files */ | ||
850 | int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, | ||
851 | u8 *newkey, u8 newkeylen) | ||
852 | { | ||
853 | /* Add Key to the list */ | ||
854 | struct tcp4_md5sig_key *key; | ||
855 | struct tcp_sock *tp = tcp_sk(sk); | ||
856 | struct tcp4_md5sig_key *keys; | ||
857 | |||
858 | key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); | ||
859 | if (key) { | ||
860 | /* Pre-existing entry - just update that one. */ | ||
861 | kfree(key->key); | ||
862 | key->key = newkey; | ||
863 | key->keylen = newkeylen; | ||
864 | } else { | ||
865 | struct tcp_md5sig_info *md5sig; | ||
866 | |||
867 | if (!tp->md5sig_info) { | ||
868 | tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), | ||
869 | GFP_ATOMIC); | ||
870 | if (!tp->md5sig_info) { | ||
871 | kfree(newkey); | ||
872 | return -ENOMEM; | ||
873 | } | ||
874 | } | ||
875 | if (tcp_alloc_md5sig_pool() == NULL) { | ||
876 | kfree(newkey); | ||
877 | return -ENOMEM; | ||
878 | } | ||
879 | md5sig = tp->md5sig_info; | ||
880 | |||
881 | if (md5sig->alloced4 == md5sig->entries4) { | ||
882 | keys = kmalloc((sizeof(*keys) * | ||
883 | (md5sig->entries4 + 1)), GFP_ATOMIC); | ||
884 | if (!keys) { | ||
885 | kfree(newkey); | ||
886 | tcp_free_md5sig_pool(); | ||
887 | return -ENOMEM; | ||
888 | } | ||
889 | |||
890 | if (md5sig->entries4) | ||
891 | memcpy(keys, md5sig->keys4, | ||
892 | sizeof(*keys) * md5sig->entries4); | ||
893 | |||
894 | /* Free old key list, and reference new one */ | ||
895 | if (md5sig->keys4) | ||
896 | kfree(md5sig->keys4); | ||
897 | md5sig->keys4 = keys; | ||
898 | md5sig->alloced4++; | ||
899 | } | ||
900 | md5sig->entries4++; | ||
901 | md5sig->keys4[md5sig->entries4 - 1].addr = addr; | ||
902 | md5sig->keys4[md5sig->entries4 - 1].key = newkey; | ||
903 | md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen; | ||
904 | } | ||
905 | return 0; | ||
906 | } | ||
907 | |||
908 | EXPORT_SYMBOL(tcp_v4_md5_do_add); | ||
909 | |||
910 | static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, | ||
911 | u8 *newkey, u8 newkeylen) | ||
912 | { | ||
913 | return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr, | ||
914 | newkey, newkeylen); | ||
915 | } | ||
916 | |||
917 | int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) | ||
918 | { | ||
919 | struct tcp_sock *tp = tcp_sk(sk); | ||
920 | int i; | ||
921 | |||
922 | for (i = 0; i < tp->md5sig_info->entries4; i++) { | ||
923 | if (tp->md5sig_info->keys4[i].addr == addr) { | ||
924 | /* Free the key */ | ||
925 | kfree(tp->md5sig_info->keys4[i].key); | ||
926 | tp->md5sig_info->entries4--; | ||
927 | |||
928 | if (tp->md5sig_info->entries4 == 0) { | ||
929 | kfree(tp->md5sig_info->keys4); | ||
930 | tp->md5sig_info->keys4 = NULL; | ||
931 | } else if (tp->md5sig_info->entries4 != i) { | ||
932 | /* Need to do some manipulation */ | ||
933 | memcpy(&tp->md5sig_info->keys4[i], | ||
934 | &tp->md5sig_info->keys4[i+1], | ||
935 | (tp->md5sig_info->entries4 - i) * | ||
936 | sizeof(struct tcp4_md5sig_key)); | ||
937 | } | ||
938 | tcp_free_md5sig_pool(); | ||
939 | return 0; | ||
940 | } | ||
941 | } | ||
942 | return -ENOENT; | ||
943 | } | ||
944 | |||
945 | EXPORT_SYMBOL(tcp_v4_md5_do_del); | ||
946 | |||
947 | static void tcp_v4_clear_md5_list(struct sock *sk) | ||
948 | { | ||
949 | struct tcp_sock *tp = tcp_sk(sk); | ||
950 | |||
951 | /* Free each key, then the set of key keys, | ||
952 | * the crypto element, and then decrement our | ||
953 | * hold on the last resort crypto. | ||
954 | */ | ||
955 | if (tp->md5sig_info->entries4) { | ||
956 | int i; | ||
957 | for (i = 0; i < tp->md5sig_info->entries4; i++) | ||
958 | kfree(tp->md5sig_info->keys4[i].key); | ||
959 | tp->md5sig_info->entries4 = 0; | ||
960 | tcp_free_md5sig_pool(); | ||
961 | } | ||
962 | if (tp->md5sig_info->keys4) { | ||
963 | kfree(tp->md5sig_info->keys4); | ||
964 | tp->md5sig_info->keys4 = NULL; | ||
965 | tp->md5sig_info->alloced4 = 0; | ||
966 | } | ||
967 | } | ||
968 | |||
969 | static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, | ||
970 | int optlen) | ||
971 | { | ||
972 | struct tcp_md5sig cmd; | ||
973 | struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; | ||
974 | u8 *newkey; | ||
975 | |||
976 | if (optlen < sizeof(cmd)) | ||
977 | return -EINVAL; | ||
978 | |||
979 | if (copy_from_user(&cmd, optval, sizeof(cmd))) | ||
980 | return -EFAULT; | ||
981 | |||
982 | if (sin->sin_family != AF_INET) | ||
983 | return -EINVAL; | ||
984 | |||
985 | if (!cmd.tcpm_key || !cmd.tcpm_keylen) { | ||
986 | if (!tcp_sk(sk)->md5sig_info) | ||
987 | return -ENOENT; | ||
988 | return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr); | ||
989 | } | ||
990 | |||
991 | if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) | ||
992 | return -EINVAL; | ||
993 | |||
994 | if (!tcp_sk(sk)->md5sig_info) { | ||
995 | struct tcp_sock *tp = tcp_sk(sk); | ||
996 | struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
997 | |||
998 | if (!p) | ||
999 | return -EINVAL; | ||
1000 | |||
1001 | tp->md5sig_info = p; | ||
1002 | |||
1003 | } | ||
1004 | |||
1005 | newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); | ||
1006 | if (!newkey) | ||
1007 | return -ENOMEM; | ||
1008 | return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr, | ||
1009 | newkey, cmd.tcpm_keylen); | ||
1010 | } | ||
1011 | |||
1012 | static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | ||
1013 | __be32 saddr, __be32 daddr, | ||
1014 | struct tcphdr *th, int protocol, | ||
1015 | int tcplen) | ||
1016 | { | ||
1017 | struct scatterlist sg[4]; | ||
1018 | __u16 data_len; | ||
1019 | int block = 0; | ||
1020 | __sum16 old_checksum; | ||
1021 | struct tcp_md5sig_pool *hp; | ||
1022 | struct tcp4_pseudohdr *bp; | ||
1023 | struct hash_desc *desc; | ||
1024 | int err; | ||
1025 | unsigned int nbytes = 0; | ||
1026 | |||
1027 | /* | ||
1028 | * Okay, so RFC2385 is turned on for this connection, | ||
1029 | * so we need to generate the MD5 hash for the packet now. | ||
1030 | */ | ||
1031 | |||
1032 | hp = tcp_get_md5sig_pool(); | ||
1033 | if (!hp) | ||
1034 | goto clear_hash_noput; | ||
1035 | |||
1036 | bp = &hp->md5_blk.ip4; | ||
1037 | desc = &hp->md5_desc; | ||
1038 | |||
1039 | /* | ||
1040 | * 1. the TCP pseudo-header (in the order: source IP address, | ||
1041 | * destination IP address, zero-padded protocol number, and | ||
1042 | * segment length) | ||
1043 | */ | ||
1044 | bp->saddr = saddr; | ||
1045 | bp->daddr = daddr; | ||
1046 | bp->pad = 0; | ||
1047 | bp->protocol = protocol; | ||
1048 | bp->len = htons(tcplen); | ||
1049 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); | ||
1050 | nbytes += sizeof(*bp); | ||
1051 | |||
1052 | /* 2. the TCP header, excluding options, and assuming a | ||
1053 | * checksum of zero/ | ||
1054 | */ | ||
1055 | old_checksum = th->check; | ||
1056 | th->check = 0; | ||
1057 | sg_set_buf(&sg[block++], th, sizeof(struct tcphdr)); | ||
1058 | nbytes += sizeof(struct tcphdr); | ||
1059 | |||
1060 | /* 3. the TCP segment data (if any) */ | ||
1061 | data_len = tcplen - (th->doff << 2); | ||
1062 | if (data_len > 0) { | ||
1063 | unsigned char *data = (unsigned char *)th + (th->doff << 2); | ||
1064 | sg_set_buf(&sg[block++], data, data_len); | ||
1065 | nbytes += data_len; | ||
1066 | } | ||
1067 | |||
1068 | /* 4. an independently-specified key or password, known to both | ||
1069 | * TCPs and presumably connection-specific | ||
1070 | */ | ||
1071 | sg_set_buf(&sg[block++], key->key, key->keylen); | ||
1072 | nbytes += key->keylen; | ||
1073 | |||
1074 | /* Now store the Hash into the packet */ | ||
1075 | err = crypto_hash_init(desc); | ||
1076 | if (err) | ||
1077 | goto clear_hash; | ||
1078 | err = crypto_hash_update(desc, sg, nbytes); | ||
1079 | if (err) | ||
1080 | goto clear_hash; | ||
1081 | err = crypto_hash_final(desc, md5_hash); | ||
1082 | if (err) | ||
1083 | goto clear_hash; | ||
1084 | |||
1085 | /* Reset header, and free up the crypto */ | ||
1086 | tcp_put_md5sig_pool(); | ||
1087 | th->check = old_checksum; | ||
1088 | |||
1089 | out: | ||
1090 | return 0; | ||
1091 | clear_hash: | ||
1092 | tcp_put_md5sig_pool(); | ||
1093 | clear_hash_noput: | ||
1094 | memset(md5_hash, 0, 16); | ||
1095 | goto out; | ||
1096 | } | ||
1097 | |||
1098 | int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | ||
1099 | struct sock *sk, | ||
1100 | struct dst_entry *dst, | ||
1101 | struct request_sock *req, | ||
1102 | struct tcphdr *th, int protocol, | ||
1103 | int tcplen) | ||
1104 | { | ||
1105 | __be32 saddr, daddr; | ||
1106 | |||
1107 | if (sk) { | ||
1108 | saddr = inet_sk(sk)->saddr; | ||
1109 | daddr = inet_sk(sk)->daddr; | ||
1110 | } else { | ||
1111 | struct rtable *rt = (struct rtable *)dst; | ||
1112 | BUG_ON(!rt); | ||
1113 | saddr = rt->rt_src; | ||
1114 | daddr = rt->rt_dst; | ||
1115 | } | ||
1116 | return tcp_v4_do_calc_md5_hash(md5_hash, key, | ||
1117 | saddr, daddr, | ||
1118 | th, protocol, tcplen); | ||
1119 | } | ||
1120 | |||
1121 | EXPORT_SYMBOL(tcp_v4_calc_md5_hash); | ||
1122 | |||
1123 | static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) | ||
1124 | { | ||
1125 | /* | ||
1126 | * This gets called for each TCP segment that arrives | ||
1127 | * so we want to be efficient. | ||
1128 | * We have 3 drop cases: | ||
1129 | * o No MD5 hash and one expected. | ||
1130 | * o MD5 hash and we're not expecting one. | ||
1131 | * o MD5 hash and its wrong. | ||
1132 | */ | ||
1133 | __u8 *hash_location = NULL; | ||
1134 | struct tcp_md5sig_key *hash_expected; | ||
1135 | struct iphdr *iph = skb->nh.iph; | ||
1136 | struct tcphdr *th = skb->h.th; | ||
1137 | int length = (th->doff << 2) - sizeof(struct tcphdr); | ||
1138 | int genhash; | ||
1139 | unsigned char *ptr; | ||
1140 | unsigned char newhash[16]; | ||
1141 | |||
1142 | hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); | ||
1143 | |||
1144 | /* | ||
1145 | * If the TCP option length is less than the TCP_MD5SIG | ||
1146 | * option length, then we can shortcut | ||
1147 | */ | ||
1148 | if (length < TCPOLEN_MD5SIG) { | ||
1149 | if (hash_expected) | ||
1150 | return 1; | ||
1151 | else | ||
1152 | return 0; | ||
1153 | } | ||
1154 | |||
1155 | /* Okay, we can't shortcut - we have to grub through the options */ | ||
1156 | ptr = (unsigned char *)(th + 1); | ||
1157 | while (length > 0) { | ||
1158 | int opcode = *ptr++; | ||
1159 | int opsize; | ||
1160 | |||
1161 | switch (opcode) { | ||
1162 | case TCPOPT_EOL: | ||
1163 | goto done_opts; | ||
1164 | case TCPOPT_NOP: | ||
1165 | length--; | ||
1166 | continue; | ||
1167 | default: | ||
1168 | opsize = *ptr++; | ||
1169 | if (opsize < 2) | ||
1170 | goto done_opts; | ||
1171 | if (opsize > length) | ||
1172 | goto done_opts; | ||
1173 | |||
1174 | if (opcode == TCPOPT_MD5SIG) { | ||
1175 | hash_location = ptr; | ||
1176 | goto done_opts; | ||
1177 | } | ||
1178 | } | ||
1179 | ptr += opsize-2; | ||
1180 | length -= opsize; | ||
1181 | } | ||
1182 | done_opts: | ||
1183 | /* We've parsed the options - do we have a hash? */ | ||
1184 | if (!hash_expected && !hash_location) | ||
1185 | return 0; | ||
1186 | |||
1187 | if (hash_expected && !hash_location) { | ||
1188 | LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found " | ||
1189 | "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n", | ||
1190 | NIPQUAD(iph->saddr), ntohs(th->source), | ||
1191 | NIPQUAD(iph->daddr), ntohs(th->dest)); | ||
1192 | return 1; | ||
1193 | } | ||
1194 | |||
1195 | if (!hash_expected && hash_location) { | ||
1196 | LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found " | ||
1197 | "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n", | ||
1198 | NIPQUAD(iph->saddr), ntohs(th->source), | ||
1199 | NIPQUAD(iph->daddr), ntohs(th->dest)); | ||
1200 | return 1; | ||
1201 | } | ||
1202 | |||
1203 | /* Okay, so this is hash_expected and hash_location - | ||
1204 | * so we need to calculate the checksum. | ||
1205 | */ | ||
1206 | genhash = tcp_v4_do_calc_md5_hash(newhash, | ||
1207 | hash_expected, | ||
1208 | iph->saddr, iph->daddr, | ||
1209 | th, sk->sk_protocol, | ||
1210 | skb->len); | ||
1211 | |||
1212 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { | ||
1213 | if (net_ratelimit()) { | ||
1214 | printk(KERN_INFO "MD5 Hash failed for " | ||
1215 | "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n", | ||
1216 | NIPQUAD(iph->saddr), ntohs(th->source), | ||
1217 | NIPQUAD(iph->daddr), ntohs(th->dest), | ||
1218 | genhash ? " tcp_v4_calc_md5_hash failed" : ""); | ||
1219 | } | ||
1220 | return 1; | ||
1221 | } | ||
1222 | return 0; | ||
1223 | } | ||
1224 | |||
1225 | #endif | ||
1226 | |||
1227 | struct request_sock_ops tcp_request_sock_ops __read_mostly = { | ||
719 | .family = PF_INET, | 1228 | .family = PF_INET, |
720 | .obj_size = sizeof(struct tcp_request_sock), | 1229 | .obj_size = sizeof(struct tcp_request_sock), |
721 | .rtx_syn_ack = tcp_v4_send_synack, | 1230 | .rtx_syn_ack = tcp_v4_send_synack, |
@@ -724,9 +1233,16 @@ struct request_sock_ops tcp_request_sock_ops = { | |||
724 | .send_reset = tcp_v4_send_reset, | 1233 | .send_reset = tcp_v4_send_reset, |
725 | }; | 1234 | }; |
726 | 1235 | ||
1236 | #ifdef CONFIG_TCP_MD5SIG | ||
1237 | static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { | ||
1238 | .md5_lookup = tcp_v4_reqsk_md5_lookup, | ||
1239 | }; | ||
1240 | #endif | ||
1241 | |||
727 | static struct timewait_sock_ops tcp_timewait_sock_ops = { | 1242 | static struct timewait_sock_ops tcp_timewait_sock_ops = { |
728 | .twsk_obj_size = sizeof(struct tcp_timewait_sock), | 1243 | .twsk_obj_size = sizeof(struct tcp_timewait_sock), |
729 | .twsk_unique = tcp_twsk_unique, | 1244 | .twsk_unique = tcp_twsk_unique, |
1245 | .twsk_destructor= tcp_twsk_destructor, | ||
730 | }; | 1246 | }; |
731 | 1247 | ||
732 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | 1248 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) |
@@ -774,6 +1290,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
774 | if (!req) | 1290 | if (!req) |
775 | goto drop; | 1291 | goto drop; |
776 | 1292 | ||
1293 | #ifdef CONFIG_TCP_MD5SIG | ||
1294 | tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; | ||
1295 | #endif | ||
1296 | |||
777 | tcp_clear_options(&tmp_opt); | 1297 | tcp_clear_options(&tmp_opt); |
778 | tmp_opt.mss_clamp = 536; | 1298 | tmp_opt.mss_clamp = 536; |
779 | tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; | 1299 | tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; |
@@ -859,7 +1379,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
859 | goto drop_and_free; | 1379 | goto drop_and_free; |
860 | } | 1380 | } |
861 | 1381 | ||
862 | isn = tcp_v4_init_sequence(sk, skb); | 1382 | isn = tcp_v4_init_sequence(skb); |
863 | } | 1383 | } |
864 | tcp_rsk(req)->snt_isn = isn; | 1384 | tcp_rsk(req)->snt_isn = isn; |
865 | 1385 | ||
@@ -892,6 +1412,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
892 | struct inet_sock *newinet; | 1412 | struct inet_sock *newinet; |
893 | struct tcp_sock *newtp; | 1413 | struct tcp_sock *newtp; |
894 | struct sock *newsk; | 1414 | struct sock *newsk; |
1415 | #ifdef CONFIG_TCP_MD5SIG | ||
1416 | struct tcp_md5sig_key *key; | ||
1417 | #endif | ||
895 | 1418 | ||
896 | if (sk_acceptq_is_full(sk)) | 1419 | if (sk_acceptq_is_full(sk)) |
897 | goto exit_overflow; | 1420 | goto exit_overflow; |
@@ -926,6 +1449,22 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
926 | newtp->advmss = dst_metric(dst, RTAX_ADVMSS); | 1449 | newtp->advmss = dst_metric(dst, RTAX_ADVMSS); |
927 | tcp_initialize_rcv_mss(newsk); | 1450 | tcp_initialize_rcv_mss(newsk); |
928 | 1451 | ||
1452 | #ifdef CONFIG_TCP_MD5SIG | ||
1453 | /* Copy over the MD5 key from the original socket */ | ||
1454 | if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) { | ||
1455 | /* | ||
1456 | * We're using one, so create a matching key | ||
1457 | * on the newsk structure. If we fail to get | ||
1458 | * memory, then we end up not copying the key | ||
1459 | * across. Shucks. | ||
1460 | */ | ||
1461 | char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); | ||
1462 | if (newkey != NULL) | ||
1463 | tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr, | ||
1464 | newkey, key->keylen); | ||
1465 | } | ||
1466 | #endif | ||
1467 | |||
929 | __inet_hash(&tcp_hashinfo, newsk, 0); | 1468 | __inet_hash(&tcp_hashinfo, newsk, 0); |
930 | __inet_inherit_port(&tcp_hashinfo, sk, newsk); | 1469 | __inet_inherit_port(&tcp_hashinfo, sk, newsk); |
931 | 1470 | ||
@@ -971,7 +1510,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
971 | return sk; | 1510 | return sk; |
972 | } | 1511 | } |
973 | 1512 | ||
974 | static int tcp_v4_checksum_init(struct sk_buff *skb) | 1513 | static __sum16 tcp_v4_checksum_init(struct sk_buff *skb) |
975 | { | 1514 | { |
976 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | 1515 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
977 | if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr, | 1516 | if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr, |
@@ -1001,10 +1540,24 @@ static int tcp_v4_checksum_init(struct sk_buff *skb) | |||
1001 | */ | 1540 | */ |
1002 | int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | 1541 | int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) |
1003 | { | 1542 | { |
1543 | struct sock *rsk; | ||
1544 | #ifdef CONFIG_TCP_MD5SIG | ||
1545 | /* | ||
1546 | * We really want to reject the packet as early as possible | ||
1547 | * if: | ||
1548 | * o We're expecting an MD5'd packet and this is no MD5 tcp option | ||
1549 | * o There is an MD5 option and we're not expecting one | ||
1550 | */ | ||
1551 | if (tcp_v4_inbound_md5_hash(sk, skb)) | ||
1552 | goto discard; | ||
1553 | #endif | ||
1554 | |||
1004 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | 1555 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
1005 | TCP_CHECK_TIMER(sk); | 1556 | TCP_CHECK_TIMER(sk); |
1006 | if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) | 1557 | if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) { |
1558 | rsk = sk; | ||
1007 | goto reset; | 1559 | goto reset; |
1560 | } | ||
1008 | TCP_CHECK_TIMER(sk); | 1561 | TCP_CHECK_TIMER(sk); |
1009 | return 0; | 1562 | return 0; |
1010 | } | 1563 | } |
@@ -1018,20 +1571,24 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1018 | goto discard; | 1571 | goto discard; |
1019 | 1572 | ||
1020 | if (nsk != sk) { | 1573 | if (nsk != sk) { |
1021 | if (tcp_child_process(sk, nsk, skb)) | 1574 | if (tcp_child_process(sk, nsk, skb)) { |
1575 | rsk = nsk; | ||
1022 | goto reset; | 1576 | goto reset; |
1577 | } | ||
1023 | return 0; | 1578 | return 0; |
1024 | } | 1579 | } |
1025 | } | 1580 | } |
1026 | 1581 | ||
1027 | TCP_CHECK_TIMER(sk); | 1582 | TCP_CHECK_TIMER(sk); |
1028 | if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) | 1583 | if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) { |
1584 | rsk = sk; | ||
1029 | goto reset; | 1585 | goto reset; |
1586 | } | ||
1030 | TCP_CHECK_TIMER(sk); | 1587 | TCP_CHECK_TIMER(sk); |
1031 | return 0; | 1588 | return 0; |
1032 | 1589 | ||
1033 | reset: | 1590 | reset: |
1034 | tcp_v4_send_reset(skb); | 1591 | tcp_v4_send_reset(rsk, skb); |
1035 | discard: | 1592 | discard: |
1036 | kfree_skb(skb); | 1593 | kfree_skb(skb); |
1037 | /* Be careful here. If this function gets more complicated and | 1594 | /* Be careful here. If this function gets more complicated and |
@@ -1140,7 +1697,7 @@ no_tcp_socket: | |||
1140 | bad_packet: | 1697 | bad_packet: |
1141 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 1698 | TCP_INC_STATS_BH(TCP_MIB_INERRS); |
1142 | } else { | 1699 | } else { |
1143 | tcp_v4_send_reset(skb); | 1700 | tcp_v4_send_reset(NULL, skb); |
1144 | } | 1701 | } |
1145 | 1702 | ||
1146 | discard_it: | 1703 | discard_it: |
@@ -1263,6 +1820,15 @@ struct inet_connection_sock_af_ops ipv4_specific = { | |||
1263 | #endif | 1820 | #endif |
1264 | }; | 1821 | }; |
1265 | 1822 | ||
1823 | #ifdef CONFIG_TCP_MD5SIG | ||
1824 | static struct tcp_sock_af_ops tcp_sock_ipv4_specific = { | ||
1825 | .md5_lookup = tcp_v4_md5_lookup, | ||
1826 | .calc_md5_hash = tcp_v4_calc_md5_hash, | ||
1827 | .md5_add = tcp_v4_md5_add_func, | ||
1828 | .md5_parse = tcp_v4_parse_md5_keys, | ||
1829 | }; | ||
1830 | #endif | ||
1831 | |||
1266 | /* NOTE: A lot of things set to zero explicitly by call to | 1832 | /* NOTE: A lot of things set to zero explicitly by call to |
1267 | * sk_alloc() so need not be done here. | 1833 | * sk_alloc() so need not be done here. |
1268 | */ | 1834 | */ |
@@ -1302,6 +1868,9 @@ static int tcp_v4_init_sock(struct sock *sk) | |||
1302 | 1868 | ||
1303 | icsk->icsk_af_ops = &ipv4_specific; | 1869 | icsk->icsk_af_ops = &ipv4_specific; |
1304 | icsk->icsk_sync_mss = tcp_sync_mss; | 1870 | icsk->icsk_sync_mss = tcp_sync_mss; |
1871 | #ifdef CONFIG_TCP_MD5SIG | ||
1872 | tp->af_specific = &tcp_sock_ipv4_specific; | ||
1873 | #endif | ||
1305 | 1874 | ||
1306 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; | 1875 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; |
1307 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; | 1876 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; |
@@ -1325,6 +1894,15 @@ int tcp_v4_destroy_sock(struct sock *sk) | |||
1325 | /* Cleans up our, hopefully empty, out_of_order_queue. */ | 1894 | /* Cleans up our, hopefully empty, out_of_order_queue. */ |
1326 | __skb_queue_purge(&tp->out_of_order_queue); | 1895 | __skb_queue_purge(&tp->out_of_order_queue); |
1327 | 1896 | ||
1897 | #ifdef CONFIG_TCP_MD5SIG | ||
1898 | /* Clean up the MD5 key list, if any */ | ||
1899 | if (tp->md5sig_info) { | ||
1900 | tcp_v4_clear_md5_list(sk); | ||
1901 | kfree(tp->md5sig_info); | ||
1902 | tp->md5sig_info = NULL; | ||
1903 | } | ||
1904 | #endif | ||
1905 | |||
1328 | #ifdef CONFIG_NET_DMA | 1906 | #ifdef CONFIG_NET_DMA |
1329 | /* Cleans up our sk_async_wait_queue */ | 1907 | /* Cleans up our sk_async_wait_queue */ |
1330 | __skb_queue_purge(&sk->sk_async_wait_queue); | 1908 | __skb_queue_purge(&sk->sk_async_wait_queue); |
@@ -1385,7 +1963,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
1385 | if (st->state == TCP_SEQ_STATE_OPENREQ) { | 1963 | if (st->state == TCP_SEQ_STATE_OPENREQ) { |
1386 | struct request_sock *req = cur; | 1964 | struct request_sock *req = cur; |
1387 | 1965 | ||
1388 | icsk = inet_csk(st->syn_wait_sk); | 1966 | icsk = inet_csk(st->syn_wait_sk); |
1389 | req = req->dl_next; | 1967 | req = req->dl_next; |
1390 | while (1) { | 1968 | while (1) { |
1391 | while (req) { | 1969 | while (req) { |
@@ -1395,7 +1973,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
1395 | } | 1973 | } |
1396 | req = req->dl_next; | 1974 | req = req->dl_next; |
1397 | } | 1975 | } |
1398 | if (++st->sbucket >= TCP_SYNQ_HSIZE) | 1976 | if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries) |
1399 | break; | 1977 | break; |
1400 | get_req: | 1978 | get_req: |
1401 | req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket]; | 1979 | req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket]; |
@@ -1543,7 +2121,7 @@ static void *established_get_idx(struct seq_file *seq, loff_t pos) | |||
1543 | while (rc && pos) { | 2121 | while (rc && pos) { |
1544 | rc = established_get_next(seq, rc); | 2122 | rc = established_get_next(seq, rc); |
1545 | --pos; | 2123 | --pos; |
1546 | } | 2124 | } |
1547 | return rc; | 2125 | return rc; |
1548 | } | 2126 | } |
1549 | 2127 | ||
@@ -1672,7 +2250,7 @@ int tcp_proc_register(struct tcp_seq_afinfo *afinfo) | |||
1672 | afinfo->seq_fops->read = seq_read; | 2250 | afinfo->seq_fops->read = seq_read; |
1673 | afinfo->seq_fops->llseek = seq_lseek; | 2251 | afinfo->seq_fops->llseek = seq_lseek; |
1674 | afinfo->seq_fops->release = seq_release_private; | 2252 | afinfo->seq_fops->release = seq_release_private; |
1675 | 2253 | ||
1676 | p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops); | 2254 | p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops); |
1677 | if (p) | 2255 | if (p) |
1678 | p->data = afinfo; | 2256 | p->data = afinfo; |
@@ -1686,7 +2264,7 @@ void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo) | |||
1686 | if (!afinfo) | 2264 | if (!afinfo) |
1687 | return; | 2265 | return; |
1688 | proc_net_remove(afinfo->name); | 2266 | proc_net_remove(afinfo->name); |
1689 | memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); | 2267 | memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); |
1690 | } | 2268 | } |
1691 | 2269 | ||
1692 | static void get_openreq4(struct sock *sk, struct request_sock *req, | 2270 | static void get_openreq4(struct sock *sk, struct request_sock *req, |
@@ -1721,8 +2299,8 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) | |||
1721 | struct tcp_sock *tp = tcp_sk(sp); | 2299 | struct tcp_sock *tp = tcp_sk(sp); |
1722 | const struct inet_connection_sock *icsk = inet_csk(sp); | 2300 | const struct inet_connection_sock *icsk = inet_csk(sp); |
1723 | struct inet_sock *inet = inet_sk(sp); | 2301 | struct inet_sock *inet = inet_sk(sp); |
1724 | unsigned int dest = inet->daddr; | 2302 | __be32 dest = inet->daddr; |
1725 | unsigned int src = inet->rcv_saddr; | 2303 | __be32 src = inet->rcv_saddr; |
1726 | __u16 destp = ntohs(inet->dport); | 2304 | __u16 destp = ntohs(inet->dport); |
1727 | __u16 srcp = ntohs(inet->sport); | 2305 | __u16 srcp = ntohs(inet->sport); |
1728 | 2306 | ||
@@ -1744,7 +2322,8 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) | |||
1744 | "%08X %5d %8d %lu %d %p %u %u %u %u %d", | 2322 | "%08X %5d %8d %lu %d %p %u %u %u %u %d", |
1745 | i, src, srcp, dest, destp, sp->sk_state, | 2323 | i, src, srcp, dest, destp, sp->sk_state, |
1746 | tp->write_seq - tp->snd_una, | 2324 | tp->write_seq - tp->snd_una, |
1747 | (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), | 2325 | sp->sk_state == TCP_LISTEN ? sp->sk_ack_backlog : |
2326 | (tp->rcv_nxt - tp->copied_seq), | ||
1748 | timer_active, | 2327 | timer_active, |
1749 | jiffies_to_clock_t(timer_expires - jiffies), | 2328 | jiffies_to_clock_t(timer_expires - jiffies), |
1750 | icsk->icsk_retransmits, | 2329 | icsk->icsk_retransmits, |
@@ -1759,7 +2338,8 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) | |||
1759 | tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); | 2338 | tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); |
1760 | } | 2339 | } |
1761 | 2340 | ||
1762 | static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i) | 2341 | static void get_timewait4_sock(struct inet_timewait_sock *tw, |
2342 | char *tmpbuf, int i) | ||
1763 | { | 2343 | { |
1764 | __be32 dest, src; | 2344 | __be32 dest, src; |
1765 | __u16 destp, srcp; | 2345 | __u16 destp, srcp; |
@@ -1872,7 +2452,8 @@ struct proto tcp_prot = { | |||
1872 | 2452 | ||
1873 | void __init tcp_v4_init(struct net_proto_family *ops) | 2453 | void __init tcp_v4_init(struct net_proto_family *ops) |
1874 | { | 2454 | { |
1875 | if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW, IPPROTO_TCP) < 0) | 2455 | if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW, |
2456 | IPPROTO_TCP) < 0) | ||
1876 | panic("Failed to create the TCP control socket.\n"); | 2457 | panic("Failed to create the TCP control socket.\n"); |
1877 | } | 2458 | } |
1878 | 2459 | ||
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index af7b2c986b1f..4a3889dd1943 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -305,6 +305,28 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
305 | tw->tw_ipv6only = np->ipv6only; | 305 | tw->tw_ipv6only = np->ipv6only; |
306 | } | 306 | } |
307 | #endif | 307 | #endif |
308 | |||
309 | #ifdef CONFIG_TCP_MD5SIG | ||
310 | /* | ||
311 | * The timewait bucket does not have the key DB from the | ||
312 | * sock structure. We just make a quick copy of the | ||
313 | * md5 key being used (if indeed we are using one) | ||
314 | * so the timewait ack generating code has the key. | ||
315 | */ | ||
316 | do { | ||
317 | struct tcp_md5sig_key *key; | ||
318 | memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key)); | ||
319 | tcptw->tw_md5_keylen = 0; | ||
320 | key = tp->af_specific->md5_lookup(sk, sk); | ||
321 | if (key != NULL) { | ||
322 | memcpy(&tcptw->tw_md5_key, key->key, key->keylen); | ||
323 | tcptw->tw_md5_keylen = key->keylen; | ||
324 | if (tcp_alloc_md5sig_pool() == NULL) | ||
325 | BUG(); | ||
326 | } | ||
327 | } while(0); | ||
328 | #endif | ||
329 | |||
308 | /* Linkage updates. */ | 330 | /* Linkage updates. */ |
309 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); | 331 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); |
310 | 332 | ||
@@ -328,14 +350,24 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
328 | * socket up. We've got bigger problems than | 350 | * socket up. We've got bigger problems than |
329 | * non-graceful socket closings. | 351 | * non-graceful socket closings. |
330 | */ | 352 | */ |
331 | if (net_ratelimit()) | 353 | LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n"); |
332 | printk(KERN_INFO "TCP: time wait bucket table overflow\n"); | ||
333 | } | 354 | } |
334 | 355 | ||
335 | tcp_update_metrics(sk); | 356 | tcp_update_metrics(sk); |
336 | tcp_done(sk); | 357 | tcp_done(sk); |
337 | } | 358 | } |
338 | 359 | ||
360 | void tcp_twsk_destructor(struct sock *sk) | ||
361 | { | ||
362 | #ifdef CONFIG_TCP_MD5SIG | ||
363 | struct tcp_timewait_sock *twsk = tcp_twsk(sk); | ||
364 | if (twsk->tw_md5_keylen) | ||
365 | tcp_put_md5sig_pool(); | ||
366 | #endif | ||
367 | } | ||
368 | |||
369 | EXPORT_SYMBOL_GPL(tcp_twsk_destructor); | ||
370 | |||
339 | /* This is not only more efficient than what we used to do, it eliminates | 371 | /* This is not only more efficient than what we used to do, it eliminates |
340 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM | 372 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM |
341 | * | 373 | * |
@@ -434,6 +466,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
434 | newtp->rx_opt.ts_recent_stamp = 0; | 466 | newtp->rx_opt.ts_recent_stamp = 0; |
435 | newtp->tcp_header_len = sizeof(struct tcphdr); | 467 | newtp->tcp_header_len = sizeof(struct tcphdr); |
436 | } | 468 | } |
469 | #ifdef CONFIG_TCP_MD5SIG | ||
470 | newtp->md5sig_info = NULL; /*XXX*/ | ||
471 | if (newtp->af_specific->md5_lookup(sk, newsk)) | ||
472 | newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; | ||
473 | #endif | ||
437 | if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) | 474 | if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) |
438 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; | 475 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; |
439 | newtp->rx_opt.mss_clamp = req->mss; | 476 | newtp->rx_opt.mss_clamp = req->mss; |
@@ -454,7 +491,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
454 | struct request_sock **prev) | 491 | struct request_sock **prev) |
455 | { | 492 | { |
456 | struct tcphdr *th = skb->h.th; | 493 | struct tcphdr *th = skb->h.th; |
457 | u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); | 494 | __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); |
458 | int paws_reject = 0; | 495 | int paws_reject = 0; |
459 | struct tcp_options_received tmp_opt; | 496 | struct tcp_options_received tmp_opt; |
460 | struct sock *child; | 497 | struct sock *child; |
@@ -616,6 +653,30 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
616 | req, NULL); | 653 | req, NULL); |
617 | if (child == NULL) | 654 | if (child == NULL) |
618 | goto listen_overflow; | 655 | goto listen_overflow; |
656 | #ifdef CONFIG_TCP_MD5SIG | ||
657 | else { | ||
658 | /* Copy over the MD5 key from the original socket */ | ||
659 | struct tcp_md5sig_key *key; | ||
660 | struct tcp_sock *tp = tcp_sk(sk); | ||
661 | key = tp->af_specific->md5_lookup(sk, child); | ||
662 | if (key != NULL) { | ||
663 | /* | ||
664 | * We're using one, so create a matching key on the | ||
665 | * newsk structure. If we fail to get memory then we | ||
666 | * end up not copying the key across. Shucks. | ||
667 | */ | ||
668 | char *newkey = kmemdup(key->key, key->keylen, | ||
669 | GFP_ATOMIC); | ||
670 | if (newkey) { | ||
671 | if (!tcp_alloc_md5sig_pool()) | ||
672 | BUG(); | ||
673 | tp->af_specific->md5_add(child, child, | ||
674 | newkey, | ||
675 | key->keylen); | ||
676 | } | ||
677 | } | ||
678 | } | ||
679 | #endif | ||
619 | 680 | ||
620 | inet_csk_reqsk_queue_unlink(sk, req, prev); | 681 | inet_csk_reqsk_queue_unlink(sk, req, prev); |
621 | inet_csk_reqsk_queue_removed(sk, req); | 682 | inet_csk_reqsk_queue_removed(sk, req); |
@@ -632,7 +693,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
632 | embryonic_reset: | 693 | embryonic_reset: |
633 | NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); | 694 | NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); |
634 | if (!(flg & TCP_FLAG_RST)) | 695 | if (!(flg & TCP_FLAG_RST)) |
635 | req->rsk_ops->send_reset(skb); | 696 | req->rsk_ops->send_reset(sk, skb); |
636 | 697 | ||
637 | inet_csk_reqsk_queue_drop(sk, req, prev); | 698 | inet_csk_reqsk_queue_drop(sk, req, prev); |
638 | return NULL; | 699 | return NULL; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index ca406157724c..32c1a972fa31 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -270,7 +270,7 @@ static u16 tcp_select_window(struct sock *sk) | |||
270 | } | 270 | } |
271 | 271 | ||
272 | static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, | 272 | static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, |
273 | __u32 tstamp) | 273 | __u32 tstamp, __u8 **md5_hash) |
274 | { | 274 | { |
275 | if (tp->rx_opt.tstamp_ok) { | 275 | if (tp->rx_opt.tstamp_ok) { |
276 | *ptr++ = htonl((TCPOPT_NOP << 24) | | 276 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
@@ -298,16 +298,29 @@ static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, | |||
298 | tp->rx_opt.eff_sacks--; | 298 | tp->rx_opt.eff_sacks--; |
299 | } | 299 | } |
300 | } | 300 | } |
301 | #ifdef CONFIG_TCP_MD5SIG | ||
302 | if (md5_hash) { | ||
303 | *ptr++ = htonl((TCPOPT_NOP << 24) | | ||
304 | (TCPOPT_NOP << 16) | | ||
305 | (TCPOPT_MD5SIG << 8) | | ||
306 | TCPOLEN_MD5SIG); | ||
307 | *md5_hash = (__u8 *)ptr; | ||
308 | } | ||
309 | #endif | ||
301 | } | 310 | } |
302 | 311 | ||
303 | /* Construct a tcp options header for a SYN or SYN_ACK packet. | 312 | /* Construct a tcp options header for a SYN or SYN_ACK packet. |
304 | * If this is every changed make sure to change the definition of | 313 | * If this is every changed make sure to change the definition of |
305 | * MAX_SYN_SIZE to match the new maximum number of options that you | 314 | * MAX_SYN_SIZE to match the new maximum number of options that you |
306 | * can generate. | 315 | * can generate. |
316 | * | ||
317 | * Note - that with the RFC2385 TCP option, we make room for the | ||
318 | * 16 byte MD5 hash. This will be filled in later, so the pointer for the | ||
319 | * location to be filled is passed back up. | ||
307 | */ | 320 | */ |
308 | static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, | 321 | static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, |
309 | int offer_wscale, int wscale, __u32 tstamp, | 322 | int offer_wscale, int wscale, __u32 tstamp, |
310 | __u32 ts_recent) | 323 | __u32 ts_recent, __u8 **md5_hash) |
311 | { | 324 | { |
312 | /* We always get an MSS option. | 325 | /* We always get an MSS option. |
313 | * The option bytes which will be seen in normal data | 326 | * The option bytes which will be seen in normal data |
@@ -346,6 +359,20 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, | |||
346 | (TCPOPT_WINDOW << 16) | | 359 | (TCPOPT_WINDOW << 16) | |
347 | (TCPOLEN_WINDOW << 8) | | 360 | (TCPOLEN_WINDOW << 8) | |
348 | (wscale)); | 361 | (wscale)); |
362 | #ifdef CONFIG_TCP_MD5SIG | ||
363 | /* | ||
364 | * If MD5 is enabled, then we set the option, and include the size | ||
365 | * (always 18). The actual MD5 hash is added just before the | ||
366 | * packet is sent. | ||
367 | */ | ||
368 | if (md5_hash) { | ||
369 | *ptr++ = htonl((TCPOPT_NOP << 24) | | ||
370 | (TCPOPT_NOP << 16) | | ||
371 | (TCPOPT_MD5SIG << 8) | | ||
372 | TCPOLEN_MD5SIG); | ||
373 | *md5_hash = (__u8 *) ptr; | ||
374 | } | ||
375 | #endif | ||
349 | } | 376 | } |
350 | 377 | ||
351 | /* This routine actually transmits TCP packets queued in by | 378 | /* This routine actually transmits TCP packets queued in by |
@@ -366,6 +393,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
366 | struct tcp_sock *tp; | 393 | struct tcp_sock *tp; |
367 | struct tcp_skb_cb *tcb; | 394 | struct tcp_skb_cb *tcb; |
368 | int tcp_header_size; | 395 | int tcp_header_size; |
396 | #ifdef CONFIG_TCP_MD5SIG | ||
397 | struct tcp_md5sig_key *md5; | ||
398 | __u8 *md5_hash_location; | ||
399 | #endif | ||
369 | struct tcphdr *th; | 400 | struct tcphdr *th; |
370 | int sysctl_flags; | 401 | int sysctl_flags; |
371 | int err; | 402 | int err; |
@@ -424,9 +455,18 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
424 | if (tcp_packets_in_flight(tp) == 0) | 455 | if (tcp_packets_in_flight(tp) == 0) |
425 | tcp_ca_event(sk, CA_EVENT_TX_START); | 456 | tcp_ca_event(sk, CA_EVENT_TX_START); |
426 | 457 | ||
458 | #ifdef CONFIG_TCP_MD5SIG | ||
459 | /* | ||
460 | * Are we doing MD5 on this segment? If so - make | ||
461 | * room for it. | ||
462 | */ | ||
463 | md5 = tp->af_specific->md5_lookup(sk, sk); | ||
464 | if (md5) | ||
465 | tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; | ||
466 | #endif | ||
467 | |||
427 | th = (struct tcphdr *) skb_push(skb, tcp_header_size); | 468 | th = (struct tcphdr *) skb_push(skb, tcp_header_size); |
428 | skb->h.th = th; | 469 | skb->h.th = th; |
429 | skb_set_owner_w(skb, sk); | ||
430 | 470 | ||
431 | /* Build TCP header and checksum it. */ | 471 | /* Build TCP header and checksum it. */ |
432 | th->source = inet->sport; | 472 | th->source = inet->sport; |
@@ -461,13 +501,34 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
461 | (sysctl_flags & SYSCTL_FLAG_WSCALE), | 501 | (sysctl_flags & SYSCTL_FLAG_WSCALE), |
462 | tp->rx_opt.rcv_wscale, | 502 | tp->rx_opt.rcv_wscale, |
463 | tcb->when, | 503 | tcb->when, |
464 | tp->rx_opt.ts_recent); | 504 | tp->rx_opt.ts_recent, |
505 | |||
506 | #ifdef CONFIG_TCP_MD5SIG | ||
507 | md5 ? &md5_hash_location : | ||
508 | #endif | ||
509 | NULL); | ||
465 | } else { | 510 | } else { |
466 | tcp_build_and_update_options((__be32 *)(th + 1), | 511 | tcp_build_and_update_options((__be32 *)(th + 1), |
467 | tp, tcb->when); | 512 | tp, tcb->when, |
513 | #ifdef CONFIG_TCP_MD5SIG | ||
514 | md5 ? &md5_hash_location : | ||
515 | #endif | ||
516 | NULL); | ||
468 | TCP_ECN_send(sk, tp, skb, tcp_header_size); | 517 | TCP_ECN_send(sk, tp, skb, tcp_header_size); |
469 | } | 518 | } |
470 | 519 | ||
520 | #ifdef CONFIG_TCP_MD5SIG | ||
521 | /* Calculate the MD5 hash, as we have all we need now */ | ||
522 | if (md5) { | ||
523 | tp->af_specific->calc_md5_hash(md5_hash_location, | ||
524 | md5, | ||
525 | sk, NULL, NULL, | ||
526 | skb->h.th, | ||
527 | sk->sk_protocol, | ||
528 | skb->len); | ||
529 | } | ||
530 | #endif | ||
531 | |||
471 | icsk->icsk_af_ops->send_check(sk, skb->len, skb); | 532 | icsk->icsk_af_ops->send_check(sk, skb->len, skb); |
472 | 533 | ||
473 | if (likely(tcb->flags & TCPCB_FLAG_ACK)) | 534 | if (likely(tcb->flags & TCPCB_FLAG_ACK)) |
@@ -479,19 +540,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
479 | if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) | 540 | if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) |
480 | TCP_INC_STATS(TCP_MIB_OUTSEGS); | 541 | TCP_INC_STATS(TCP_MIB_OUTSEGS); |
481 | 542 | ||
482 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); | 543 | err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0); |
483 | if (likely(err <= 0)) | 544 | if (likely(err <= 0)) |
484 | return err; | 545 | return err; |
485 | 546 | ||
486 | tcp_enter_cwr(sk); | 547 | tcp_enter_cwr(sk); |
487 | 548 | ||
488 | /* NET_XMIT_CN is special. It does not guarantee, | 549 | return net_xmit_eval(err); |
489 | * that this packet is lost. It tells that device | ||
490 | * is about to start to drop packets or already | ||
491 | * drops some packets of the same priority and | ||
492 | * invokes us to send less aggressively. | ||
493 | */ | ||
494 | return err == NET_XMIT_CN ? 0 : err; | ||
495 | 550 | ||
496 | #undef SYSCTL_FLAG_TSTAMPS | 551 | #undef SYSCTL_FLAG_TSTAMPS |
497 | #undef SYSCTL_FLAG_WSCALE | 552 | #undef SYSCTL_FLAG_WSCALE |
@@ -847,6 +902,11 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | |||
847 | mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + | 902 | mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + |
848 | (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); | 903 | (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); |
849 | 904 | ||
905 | #ifdef CONFIG_TCP_MD5SIG | ||
906 | if (tp->af_specific->md5_lookup(sk, sk)) | ||
907 | mss_now -= TCPOLEN_MD5SIG_ALIGNED; | ||
908 | #endif | ||
909 | |||
850 | xmit_size_goal = mss_now; | 910 | xmit_size_goal = mss_now; |
851 | 911 | ||
852 | if (doing_tso) { | 912 | if (doing_tso) { |
@@ -2040,6 +2100,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2040 | struct tcphdr *th; | 2100 | struct tcphdr *th; |
2041 | int tcp_header_size; | 2101 | int tcp_header_size; |
2042 | struct sk_buff *skb; | 2102 | struct sk_buff *skb; |
2103 | #ifdef CONFIG_TCP_MD5SIG | ||
2104 | struct tcp_md5sig_key *md5; | ||
2105 | __u8 *md5_hash_location; | ||
2106 | #endif | ||
2043 | 2107 | ||
2044 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); | 2108 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); |
2045 | if (skb == NULL) | 2109 | if (skb == NULL) |
@@ -2055,6 +2119,13 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2055 | (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + | 2119 | (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + |
2056 | /* SACK_PERM is in the place of NOP NOP of TS */ | 2120 | /* SACK_PERM is in the place of NOP NOP of TS */ |
2057 | ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); | 2121 | ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); |
2122 | |||
2123 | #ifdef CONFIG_TCP_MD5SIG | ||
2124 | /* Are we doing MD5 on this segment? If so - make room for it */ | ||
2125 | md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); | ||
2126 | if (md5) | ||
2127 | tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; | ||
2128 | #endif | ||
2058 | skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); | 2129 | skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); |
2059 | 2130 | ||
2060 | memset(th, 0, sizeof(struct tcphdr)); | 2131 | memset(th, 0, sizeof(struct tcphdr)); |
@@ -2092,11 +2163,29 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2092 | tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, | 2163 | tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, |
2093 | ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, | 2164 | ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, |
2094 | TCP_SKB_CB(skb)->when, | 2165 | TCP_SKB_CB(skb)->when, |
2095 | req->ts_recent); | 2166 | req->ts_recent, |
2167 | ( | ||
2168 | #ifdef CONFIG_TCP_MD5SIG | ||
2169 | md5 ? &md5_hash_location : | ||
2170 | #endif | ||
2171 | NULL) | ||
2172 | ); | ||
2096 | 2173 | ||
2097 | skb->csum = 0; | 2174 | skb->csum = 0; |
2098 | th->doff = (tcp_header_size >> 2); | 2175 | th->doff = (tcp_header_size >> 2); |
2099 | TCP_INC_STATS(TCP_MIB_OUTSEGS); | 2176 | TCP_INC_STATS(TCP_MIB_OUTSEGS); |
2177 | |||
2178 | #ifdef CONFIG_TCP_MD5SIG | ||
2179 | /* Okay, we have all we need - do the md5 hash if needed */ | ||
2180 | if (md5) { | ||
2181 | tp->af_specific->calc_md5_hash(md5_hash_location, | ||
2182 | md5, | ||
2183 | NULL, dst, req, | ||
2184 | skb->h.th, sk->sk_protocol, | ||
2185 | skb->len); | ||
2186 | } | ||
2187 | #endif | ||
2188 | |||
2100 | return skb; | 2189 | return skb; |
2101 | } | 2190 | } |
2102 | 2191 | ||
@@ -2115,6 +2204,11 @@ static void tcp_connect_init(struct sock *sk) | |||
2115 | tp->tcp_header_len = sizeof(struct tcphdr) + | 2204 | tp->tcp_header_len = sizeof(struct tcphdr) + |
2116 | (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); | 2205 | (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); |
2117 | 2206 | ||
2207 | #ifdef CONFIG_TCP_MD5SIG | ||
2208 | if (tp->af_specific->md5_lookup(sk, sk) != NULL) | ||
2209 | tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; | ||
2210 | #endif | ||
2211 | |||
2118 | /* If user gave his TCP_MAXSEG, record it to clamp */ | 2212 | /* If user gave his TCP_MAXSEG, record it to clamp */ |
2119 | if (tp->rx_opt.user_mss) | 2213 | if (tp->rx_opt.user_mss) |
2120 | tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; | 2214 | tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; |
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 4be336f17883..f230eeecf092 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c | |||
@@ -156,6 +156,8 @@ static __init int tcpprobe_init(void) | |||
156 | init_waitqueue_head(&tcpw.wait); | 156 | init_waitqueue_head(&tcpw.wait); |
157 | spin_lock_init(&tcpw.lock); | 157 | spin_lock_init(&tcpw.lock); |
158 | tcpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &tcpw.lock); | 158 | tcpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &tcpw.lock); |
159 | if (IS_ERR(tcpw.fifo)) | ||
160 | return PTR_ERR(tcpw.fifo); | ||
159 | 161 | ||
160 | if (!proc_net_fops_create(procname, S_IRUSR, &tcpprobe_fops)) | 162 | if (!proc_net_fops_create(procname, S_IRUSR, &tcpprobe_fops)) |
161 | goto err0; | 163 | goto err0; |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index fb09ade5897b..3355c276b611 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -297,7 +297,7 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
297 | if (net_ratelimit()) { | 297 | if (net_ratelimit()) { |
298 | struct inet_sock *inet = inet_sk(sk); | 298 | struct inet_sock *inet = inet_sk(sk); |
299 | printk(KERN_DEBUG "TCP: Treason uncloaked! Peer %u.%u.%u.%u:%u/%u shrinks window %u:%u. Repaired.\n", | 299 | printk(KERN_DEBUG "TCP: Treason uncloaked! Peer %u.%u.%u.%u:%u/%u shrinks window %u:%u. Repaired.\n", |
300 | NIPQUAD(inet->daddr), htons(inet->dport), | 300 | NIPQUAD(inet->daddr), ntohs(inet->dport), |
301 | inet->num, tp->snd_una, tp->snd_nxt); | 301 | inet->num, tp->snd_una, tp->snd_nxt); |
302 | } | 302 | } |
303 | #endif | 303 | #endif |
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index a3b7aa015a2f..ddc4bcc5785e 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c | |||
@@ -42,8 +42,8 @@ | |||
42 | * with V_PARAM_SHIFT bits to the right of the binary point. | 42 | * with V_PARAM_SHIFT bits to the right of the binary point. |
43 | */ | 43 | */ |
44 | #define V_PARAM_SHIFT 1 | 44 | #define V_PARAM_SHIFT 1 |
45 | static int alpha = 1<<V_PARAM_SHIFT; | 45 | static int alpha = 2<<V_PARAM_SHIFT; |
46 | static int beta = 3<<V_PARAM_SHIFT; | 46 | static int beta = 4<<V_PARAM_SHIFT; |
47 | static int gamma = 1<<V_PARAM_SHIFT; | 47 | static int gamma = 1<<V_PARAM_SHIFT; |
48 | 48 | ||
49 | module_param(alpha, int, 0644); | 49 | module_param(alpha, int, 0644); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 865d75214a9a..035915fc9ed3 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -92,22 +92,16 @@ | |||
92 | #include <linux/timer.h> | 92 | #include <linux/timer.h> |
93 | #include <linux/mm.h> | 93 | #include <linux/mm.h> |
94 | #include <linux/inet.h> | 94 | #include <linux/inet.h> |
95 | #include <linux/ipv6.h> | ||
96 | #include <linux/netdevice.h> | 95 | #include <linux/netdevice.h> |
97 | #include <net/snmp.h> | ||
98 | #include <net/ip.h> | ||
99 | #include <net/tcp_states.h> | 96 | #include <net/tcp_states.h> |
100 | #include <net/protocol.h> | ||
101 | #include <linux/skbuff.h> | 97 | #include <linux/skbuff.h> |
102 | #include <linux/proc_fs.h> | 98 | #include <linux/proc_fs.h> |
103 | #include <linux/seq_file.h> | 99 | #include <linux/seq_file.h> |
104 | #include <net/sock.h> | ||
105 | #include <net/udp.h> | ||
106 | #include <net/icmp.h> | 100 | #include <net/icmp.h> |
107 | #include <net/route.h> | 101 | #include <net/route.h> |
108 | #include <net/inet_common.h> | ||
109 | #include <net/checksum.h> | 102 | #include <net/checksum.h> |
110 | #include <net/xfrm.h> | 103 | #include <net/xfrm.h> |
104 | #include "udp_impl.h" | ||
111 | 105 | ||
112 | /* | 106 | /* |
113 | * Snmp MIB for the UDP layer | 107 | * Snmp MIB for the UDP layer |
@@ -120,26 +114,30 @@ DEFINE_RWLOCK(udp_hash_lock); | |||
120 | 114 | ||
121 | static int udp_port_rover; | 115 | static int udp_port_rover; |
122 | 116 | ||
123 | static inline int udp_lport_inuse(u16 num) | 117 | static inline int __udp_lib_lport_inuse(__u16 num, struct hlist_head udptable[]) |
124 | { | 118 | { |
125 | struct sock *sk; | 119 | struct sock *sk; |
126 | struct hlist_node *node; | 120 | struct hlist_node *node; |
127 | 121 | ||
128 | sk_for_each(sk, node, &udp_hash[num & (UDP_HTABLE_SIZE - 1)]) | 122 | sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) |
129 | if (inet_sk(sk)->num == num) | 123 | if (inet_sk(sk)->num == num) |
130 | return 1; | 124 | return 1; |
131 | return 0; | 125 | return 0; |
132 | } | 126 | } |
133 | 127 | ||
134 | /** | 128 | /** |
135 | * udp_get_port - common port lookup for IPv4 and IPv6 | 129 | * __udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 |
136 | * | 130 | * |
137 | * @sk: socket struct in question | 131 | * @sk: socket struct in question |
138 | * @snum: port number to look up | 132 | * @snum: port number to look up |
133 | * @udptable: hash list table, must be of UDP_HTABLE_SIZE | ||
134 | * @port_rover: pointer to record of last unallocated port | ||
139 | * @saddr_comp: AF-dependent comparison of bound local IP addresses | 135 | * @saddr_comp: AF-dependent comparison of bound local IP addresses |
140 | */ | 136 | */ |
141 | int udp_get_port(struct sock *sk, unsigned short snum, | 137 | int __udp_lib_get_port(struct sock *sk, unsigned short snum, |
142 | int (*saddr_cmp)(const struct sock *sk1, const struct sock *sk2)) | 138 | struct hlist_head udptable[], int *port_rover, |
139 | int (*saddr_comp)(const struct sock *sk1, | ||
140 | const struct sock *sk2 ) ) | ||
143 | { | 141 | { |
144 | struct hlist_node *node; | 142 | struct hlist_node *node; |
145 | struct hlist_head *head; | 143 | struct hlist_head *head; |
@@ -150,15 +148,15 @@ int udp_get_port(struct sock *sk, unsigned short snum, | |||
150 | if (snum == 0) { | 148 | if (snum == 0) { |
151 | int best_size_so_far, best, result, i; | 149 | int best_size_so_far, best, result, i; |
152 | 150 | ||
153 | if (udp_port_rover > sysctl_local_port_range[1] || | 151 | if (*port_rover > sysctl_local_port_range[1] || |
154 | udp_port_rover < sysctl_local_port_range[0]) | 152 | *port_rover < sysctl_local_port_range[0]) |
155 | udp_port_rover = sysctl_local_port_range[0]; | 153 | *port_rover = sysctl_local_port_range[0]; |
156 | best_size_so_far = 32767; | 154 | best_size_so_far = 32767; |
157 | best = result = udp_port_rover; | 155 | best = result = *port_rover; |
158 | for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { | 156 | for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { |
159 | int size; | 157 | int size; |
160 | 158 | ||
161 | head = &udp_hash[result & (UDP_HTABLE_SIZE - 1)]; | 159 | head = &udptable[result & (UDP_HTABLE_SIZE - 1)]; |
162 | if (hlist_empty(head)) { | 160 | if (hlist_empty(head)) { |
163 | if (result > sysctl_local_port_range[1]) | 161 | if (result > sysctl_local_port_range[1]) |
164 | result = sysctl_local_port_range[0] + | 162 | result = sysctl_local_port_range[0] + |
@@ -179,15 +177,15 @@ int udp_get_port(struct sock *sk, unsigned short snum, | |||
179 | result = sysctl_local_port_range[0] | 177 | result = sysctl_local_port_range[0] |
180 | + ((result - sysctl_local_port_range[0]) & | 178 | + ((result - sysctl_local_port_range[0]) & |
181 | (UDP_HTABLE_SIZE - 1)); | 179 | (UDP_HTABLE_SIZE - 1)); |
182 | if (!udp_lport_inuse(result)) | 180 | if (! __udp_lib_lport_inuse(result, udptable)) |
183 | break; | 181 | break; |
184 | } | 182 | } |
185 | if (i >= (1 << 16) / UDP_HTABLE_SIZE) | 183 | if (i >= (1 << 16) / UDP_HTABLE_SIZE) |
186 | goto fail; | 184 | goto fail; |
187 | gotit: | 185 | gotit: |
188 | udp_port_rover = snum = result; | 186 | *port_rover = snum = result; |
189 | } else { | 187 | } else { |
190 | head = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)]; | 188 | head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; |
191 | 189 | ||
192 | sk_for_each(sk2, node, head) | 190 | sk_for_each(sk2, node, head) |
193 | if (inet_sk(sk2)->num == snum && | 191 | if (inet_sk(sk2)->num == snum && |
@@ -195,12 +193,12 @@ gotit: | |||
195 | (!sk2->sk_reuse || !sk->sk_reuse) && | 193 | (!sk2->sk_reuse || !sk->sk_reuse) && |
196 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if | 194 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if |
197 | || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && | 195 | || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && |
198 | (*saddr_cmp)(sk, sk2) ) | 196 | (*saddr_comp)(sk, sk2) ) |
199 | goto fail; | 197 | goto fail; |
200 | } | 198 | } |
201 | inet_sk(sk)->num = snum; | 199 | inet_sk(sk)->num = snum; |
202 | if (sk_unhashed(sk)) { | 200 | if (sk_unhashed(sk)) { |
203 | head = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)]; | 201 | head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; |
204 | sk_add_node(sk, head); | 202 | sk_add_node(sk, head); |
205 | sock_prot_inc_use(sk->sk_prot); | 203 | sock_prot_inc_use(sk->sk_prot); |
206 | } | 204 | } |
@@ -210,7 +208,13 @@ fail: | |||
210 | return error; | 208 | return error; |
211 | } | 209 | } |
212 | 210 | ||
213 | static inline int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) | 211 | __inline__ int udp_get_port(struct sock *sk, unsigned short snum, |
212 | int (*scmp)(const struct sock *, const struct sock *)) | ||
213 | { | ||
214 | return __udp_lib_get_port(sk, snum, udp_hash, &udp_port_rover, scmp); | ||
215 | } | ||
216 | |||
217 | inline int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) | ||
214 | { | 218 | { |
215 | struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); | 219 | struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); |
216 | 220 | ||
@@ -224,34 +228,20 @@ static inline int udp_v4_get_port(struct sock *sk, unsigned short snum) | |||
224 | return udp_get_port(sk, snum, ipv4_rcv_saddr_equal); | 228 | return udp_get_port(sk, snum, ipv4_rcv_saddr_equal); |
225 | } | 229 | } |
226 | 230 | ||
227 | |||
228 | static void udp_v4_hash(struct sock *sk) | ||
229 | { | ||
230 | BUG(); | ||
231 | } | ||
232 | |||
233 | static void udp_v4_unhash(struct sock *sk) | ||
234 | { | ||
235 | write_lock_bh(&udp_hash_lock); | ||
236 | if (sk_del_node_init(sk)) { | ||
237 | inet_sk(sk)->num = 0; | ||
238 | sock_prot_dec_use(sk->sk_prot); | ||
239 | } | ||
240 | write_unlock_bh(&udp_hash_lock); | ||
241 | } | ||
242 | |||
243 | /* UDP is nearly always wildcards out the wazoo, it makes no sense to try | 231 | /* UDP is nearly always wildcards out the wazoo, it makes no sense to try |
244 | * harder than this. -DaveM | 232 | * harder than this. -DaveM |
245 | */ | 233 | */ |
246 | static struct sock *udp_v4_lookup_longway(__be32 saddr, __be16 sport, | 234 | static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport, |
247 | __be32 daddr, __be16 dport, int dif) | 235 | __be32 daddr, __be16 dport, |
236 | int dif, struct hlist_head udptable[]) | ||
248 | { | 237 | { |
249 | struct sock *sk, *result = NULL; | 238 | struct sock *sk, *result = NULL; |
250 | struct hlist_node *node; | 239 | struct hlist_node *node; |
251 | unsigned short hnum = ntohs(dport); | 240 | unsigned short hnum = ntohs(dport); |
252 | int badness = -1; | 241 | int badness = -1; |
253 | 242 | ||
254 | sk_for_each(sk, node, &udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]) { | 243 | read_lock(&udp_hash_lock); |
244 | sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { | ||
255 | struct inet_sock *inet = inet_sk(sk); | 245 | struct inet_sock *inet = inet_sk(sk); |
256 | 246 | ||
257 | if (inet->num == hnum && !ipv6_only_sock(sk)) { | 247 | if (inet->num == hnum && !ipv6_only_sock(sk)) { |
@@ -285,20 +275,10 @@ static struct sock *udp_v4_lookup_longway(__be32 saddr, __be16 sport, | |||
285 | } | 275 | } |
286 | } | 276 | } |
287 | } | 277 | } |
288 | return result; | 278 | if (result) |
289 | } | 279 | sock_hold(result); |
290 | |||
291 | static __inline__ struct sock *udp_v4_lookup(__be32 saddr, __be16 sport, | ||
292 | __be32 daddr, __be16 dport, int dif) | ||
293 | { | ||
294 | struct sock *sk; | ||
295 | |||
296 | read_lock(&udp_hash_lock); | ||
297 | sk = udp_v4_lookup_longway(saddr, sport, daddr, dport, dif); | ||
298 | if (sk) | ||
299 | sock_hold(sk); | ||
300 | read_unlock(&udp_hash_lock); | 280 | read_unlock(&udp_hash_lock); |
301 | return sk; | 281 | return result; |
302 | } | 282 | } |
303 | 283 | ||
304 | static inline struct sock *udp_v4_mcast_next(struct sock *sk, | 284 | static inline struct sock *udp_v4_mcast_next(struct sock *sk, |
@@ -340,7 +320,7 @@ found: | |||
340 | * to find the appropriate port. | 320 | * to find the appropriate port. |
341 | */ | 321 | */ |
342 | 322 | ||
343 | void udp_err(struct sk_buff *skb, u32 info) | 323 | void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[]) |
344 | { | 324 | { |
345 | struct inet_sock *inet; | 325 | struct inet_sock *inet; |
346 | struct iphdr *iph = (struct iphdr*)skb->data; | 326 | struct iphdr *iph = (struct iphdr*)skb->data; |
@@ -351,7 +331,8 @@ void udp_err(struct sk_buff *skb, u32 info) | |||
351 | int harderr; | 331 | int harderr; |
352 | int err; | 332 | int err; |
353 | 333 | ||
354 | sk = udp_v4_lookup(iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex); | 334 | sk = __udp4_lib_lookup(iph->daddr, uh->dest, iph->saddr, uh->source, |
335 | skb->dev->ifindex, udptable ); | ||
355 | if (sk == NULL) { | 336 | if (sk == NULL) { |
356 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 337 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); |
357 | return; /* No socket for error */ | 338 | return; /* No socket for error */ |
@@ -405,6 +386,11 @@ out: | |||
405 | sock_put(sk); | 386 | sock_put(sk); |
406 | } | 387 | } |
407 | 388 | ||
389 | __inline__ void udp_err(struct sk_buff *skb, u32 info) | ||
390 | { | ||
391 | return __udp4_lib_err(skb, info, udp_hash); | ||
392 | } | ||
393 | |||
408 | /* | 394 | /* |
409 | * Throw away all pending data and cancel the corking. Socket is locked. | 395 | * Throw away all pending data and cancel the corking. Socket is locked. |
410 | */ | 396 | */ |
@@ -419,16 +405,58 @@ static void udp_flush_pending_frames(struct sock *sk) | |||
419 | } | 405 | } |
420 | } | 406 | } |
421 | 407 | ||
408 | /** | ||
409 | * udp4_hwcsum_outgoing - handle outgoing HW checksumming | ||
410 | * @sk: socket we are sending on | ||
411 | * @skb: sk_buff containing the filled-in UDP header | ||
412 | * (checksum field must be zeroed out) | ||
413 | */ | ||
414 | static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, | ||
415 | __be32 src, __be32 dst, int len ) | ||
416 | { | ||
417 | unsigned int offset; | ||
418 | struct udphdr *uh = skb->h.uh; | ||
419 | __wsum csum = 0; | ||
420 | |||
421 | if (skb_queue_len(&sk->sk_write_queue) == 1) { | ||
422 | /* | ||
423 | * Only one fragment on the socket. | ||
424 | */ | ||
425 | skb->csum_offset = offsetof(struct udphdr, check); | ||
426 | uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); | ||
427 | } else { | ||
428 | /* | ||
429 | * HW-checksum won't work as there are two or more | ||
430 | * fragments on the socket so that all csums of sk_buffs | ||
431 | * should be together | ||
432 | */ | ||
433 | offset = skb->h.raw - skb->data; | ||
434 | skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); | ||
435 | |||
436 | skb->ip_summed = CHECKSUM_NONE; | ||
437 | |||
438 | skb_queue_walk(&sk->sk_write_queue, skb) { | ||
439 | csum = csum_add(csum, skb->csum); | ||
440 | } | ||
441 | |||
442 | uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); | ||
443 | if (uh->check == 0) | ||
444 | uh->check = CSUM_MANGLED_0; | ||
445 | } | ||
446 | } | ||
447 | |||
422 | /* | 448 | /* |
423 | * Push out all pending data as one UDP datagram. Socket is locked. | 449 | * Push out all pending data as one UDP datagram. Socket is locked. |
424 | */ | 450 | */ |
425 | static int udp_push_pending_frames(struct sock *sk, struct udp_sock *up) | 451 | static int udp_push_pending_frames(struct sock *sk) |
426 | { | 452 | { |
453 | struct udp_sock *up = udp_sk(sk); | ||
427 | struct inet_sock *inet = inet_sk(sk); | 454 | struct inet_sock *inet = inet_sk(sk); |
428 | struct flowi *fl = &inet->cork.fl; | 455 | struct flowi *fl = &inet->cork.fl; |
429 | struct sk_buff *skb; | 456 | struct sk_buff *skb; |
430 | struct udphdr *uh; | 457 | struct udphdr *uh; |
431 | int err = 0; | 458 | int err = 0; |
459 | __wsum csum = 0; | ||
432 | 460 | ||
433 | /* Grab the skbuff where UDP header space exists. */ | 461 | /* Grab the skbuff where UDP header space exists. */ |
434 | if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) | 462 | if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) |
@@ -443,52 +471,28 @@ static int udp_push_pending_frames(struct sock *sk, struct udp_sock *up) | |||
443 | uh->len = htons(up->len); | 471 | uh->len = htons(up->len); |
444 | uh->check = 0; | 472 | uh->check = 0; |
445 | 473 | ||
446 | if (sk->sk_no_check == UDP_CSUM_NOXMIT) { | 474 | if (up->pcflag) /* UDP-Lite */ |
475 | csum = udplite_csum_outgoing(sk, skb); | ||
476 | |||
477 | else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ | ||
478 | |||
447 | skb->ip_summed = CHECKSUM_NONE; | 479 | skb->ip_summed = CHECKSUM_NONE; |
448 | goto send; | 480 | goto send; |
449 | } | ||
450 | 481 | ||
451 | if (skb_queue_len(&sk->sk_write_queue) == 1) { | 482 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ |
452 | /* | ||
453 | * Only one fragment on the socket. | ||
454 | */ | ||
455 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
456 | skb->csum = offsetof(struct udphdr, check); | ||
457 | uh->check = ~csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, | ||
458 | up->len, IPPROTO_UDP, 0); | ||
459 | } else { | ||
460 | skb->csum = csum_partial((char *)uh, | ||
461 | sizeof(struct udphdr), skb->csum); | ||
462 | uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, | ||
463 | up->len, IPPROTO_UDP, skb->csum); | ||
464 | if (uh->check == 0) | ||
465 | uh->check = -1; | ||
466 | } | ||
467 | } else { | ||
468 | unsigned int csum = 0; | ||
469 | /* | ||
470 | * HW-checksum won't work as there are two or more | ||
471 | * fragments on the socket so that all csums of sk_buffs | ||
472 | * should be together. | ||
473 | */ | ||
474 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
475 | int offset = (unsigned char *)uh - skb->data; | ||
476 | skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); | ||
477 | 483 | ||
478 | skb->ip_summed = CHECKSUM_NONE; | 484 | udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len); |
479 | } else { | 485 | goto send; |
480 | skb->csum = csum_partial((char *)uh, | 486 | |
481 | sizeof(struct udphdr), skb->csum); | 487 | } else /* `normal' UDP */ |
482 | } | 488 | csum = udp_csum_outgoing(sk, skb); |
489 | |||
490 | /* add protocol-dependent pseudo-header */ | ||
491 | uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len, | ||
492 | sk->sk_protocol, csum ); | ||
493 | if (uh->check == 0) | ||
494 | uh->check = CSUM_MANGLED_0; | ||
483 | 495 | ||
484 | skb_queue_walk(&sk->sk_write_queue, skb) { | ||
485 | csum = csum_add(csum, skb->csum); | ||
486 | } | ||
487 | uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, | ||
488 | up->len, IPPROTO_UDP, csum); | ||
489 | if (uh->check == 0) | ||
490 | uh->check = -1; | ||
491 | } | ||
492 | send: | 496 | send: |
493 | err = ip_push_pending_frames(sk); | 497 | err = ip_push_pending_frames(sk); |
494 | out: | 498 | out: |
@@ -497,12 +501,6 @@ out: | |||
497 | return err; | 501 | return err; |
498 | } | 502 | } |
499 | 503 | ||
500 | |||
501 | static unsigned short udp_check(struct udphdr *uh, int len, __be32 saddr, __be32 daddr, unsigned long base) | ||
502 | { | ||
503 | return(csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base)); | ||
504 | } | ||
505 | |||
506 | int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | 504 | int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, |
507 | size_t len) | 505 | size_t len) |
508 | { | 506 | { |
@@ -516,8 +514,9 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
516 | __be32 daddr, faddr, saddr; | 514 | __be32 daddr, faddr, saddr; |
517 | __be16 dport; | 515 | __be16 dport; |
518 | u8 tos; | 516 | u8 tos; |
519 | int err; | 517 | int err, is_udplite = up->pcflag; |
520 | int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; | 518 | int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; |
519 | int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); | ||
521 | 520 | ||
522 | if (len > 0xFFFF) | 521 | if (len > 0xFFFF) |
523 | return -EMSGSIZE; | 522 | return -EMSGSIZE; |
@@ -622,7 +621,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
622 | { .daddr = faddr, | 621 | { .daddr = faddr, |
623 | .saddr = saddr, | 622 | .saddr = saddr, |
624 | .tos = tos } }, | 623 | .tos = tos } }, |
625 | .proto = IPPROTO_UDP, | 624 | .proto = sk->sk_protocol, |
626 | .uli_u = { .ports = | 625 | .uli_u = { .ports = |
627 | { .sport = inet->sport, | 626 | { .sport = inet->sport, |
628 | .dport = dport } } }; | 627 | .dport = dport } } }; |
@@ -668,13 +667,14 @@ back_from_confirm: | |||
668 | 667 | ||
669 | do_append_data: | 668 | do_append_data: |
670 | up->len += ulen; | 669 | up->len += ulen; |
671 | err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen, | 670 | getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; |
672 | sizeof(struct udphdr), &ipc, rt, | 671 | err = ip_append_data(sk, getfrag, msg->msg_iov, ulen, |
672 | sizeof(struct udphdr), &ipc, rt, | ||
673 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); | 673 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); |
674 | if (err) | 674 | if (err) |
675 | udp_flush_pending_frames(sk); | 675 | udp_flush_pending_frames(sk); |
676 | else if (!corkreq) | 676 | else if (!corkreq) |
677 | err = udp_push_pending_frames(sk, up); | 677 | err = udp_push_pending_frames(sk); |
678 | else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) | 678 | else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) |
679 | up->pending = 0; | 679 | up->pending = 0; |
680 | release_sock(sk); | 680 | release_sock(sk); |
@@ -684,7 +684,7 @@ out: | |||
684 | if (free) | 684 | if (free) |
685 | kfree(ipc.opt); | 685 | kfree(ipc.opt); |
686 | if (!err) { | 686 | if (!err) { |
687 | UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS); | 687 | UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); |
688 | return len; | 688 | return len; |
689 | } | 689 | } |
690 | /* | 690 | /* |
@@ -695,7 +695,7 @@ out: | |||
695 | * seems like overkill. | 695 | * seems like overkill. |
696 | */ | 696 | */ |
697 | if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { | 697 | if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { |
698 | UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS); | 698 | UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); |
699 | } | 699 | } |
700 | return err; | 700 | return err; |
701 | 701 | ||
@@ -707,8 +707,8 @@ do_confirm: | |||
707 | goto out; | 707 | goto out; |
708 | } | 708 | } |
709 | 709 | ||
710 | static int udp_sendpage(struct sock *sk, struct page *page, int offset, | 710 | int udp_sendpage(struct sock *sk, struct page *page, int offset, |
711 | size_t size, int flags) | 711 | size_t size, int flags) |
712 | { | 712 | { |
713 | struct udp_sock *up = udp_sk(sk); | 713 | struct udp_sock *up = udp_sk(sk); |
714 | int ret; | 714 | int ret; |
@@ -747,7 +747,7 @@ static int udp_sendpage(struct sock *sk, struct page *page, int offset, | |||
747 | 747 | ||
748 | up->len += size; | 748 | up->len += size; |
749 | if (!(up->corkflag || (flags&MSG_MORE))) | 749 | if (!(up->corkflag || (flags&MSG_MORE))) |
750 | ret = udp_push_pending_frames(sk, up); | 750 | ret = udp_push_pending_frames(sk); |
751 | if (!ret) | 751 | if (!ret) |
752 | ret = size; | 752 | ret = size; |
753 | out: | 753 | out: |
@@ -795,29 +795,18 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
795 | return(0); | 795 | return(0); |
796 | } | 796 | } |
797 | 797 | ||
798 | static __inline__ int __udp_checksum_complete(struct sk_buff *skb) | ||
799 | { | ||
800 | return __skb_checksum_complete(skb); | ||
801 | } | ||
802 | |||
803 | static __inline__ int udp_checksum_complete(struct sk_buff *skb) | ||
804 | { | ||
805 | return skb->ip_summed != CHECKSUM_UNNECESSARY && | ||
806 | __udp_checksum_complete(skb); | ||
807 | } | ||
808 | |||
809 | /* | 798 | /* |
810 | * This should be easy, if there is something there we | 799 | * This should be easy, if there is something there we |
811 | * return it, otherwise we block. | 800 | * return it, otherwise we block. |
812 | */ | 801 | */ |
813 | 802 | ||
814 | static int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | 803 | int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, |
815 | size_t len, int noblock, int flags, int *addr_len) | 804 | size_t len, int noblock, int flags, int *addr_len) |
816 | { | 805 | { |
817 | struct inet_sock *inet = inet_sk(sk); | 806 | struct inet_sock *inet = inet_sk(sk); |
818 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; | 807 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; |
819 | struct sk_buff *skb; | 808 | struct sk_buff *skb; |
820 | int copied, err; | 809 | int copied, err, copy_only, is_udplite = IS_UDPLITE(sk); |
821 | 810 | ||
822 | /* | 811 | /* |
823 | * Check any passed addresses | 812 | * Check any passed addresses |
@@ -839,15 +828,25 @@ try_again: | |||
839 | msg->msg_flags |= MSG_TRUNC; | 828 | msg->msg_flags |= MSG_TRUNC; |
840 | } | 829 | } |
841 | 830 | ||
842 | if (skb->ip_summed==CHECKSUM_UNNECESSARY) { | 831 | /* |
843 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, | 832 | * Decide whether to checksum and/or copy data. |
844 | copied); | 833 | * |
845 | } else if (msg->msg_flags&MSG_TRUNC) { | 834 | * UDP: checksum may have been computed in HW, |
846 | if (__udp_checksum_complete(skb)) | 835 | * (re-)compute it if message is truncated. |
836 | * UDP-Lite: always needs to checksum, no HW support. | ||
837 | */ | ||
838 | copy_only = (skb->ip_summed==CHECKSUM_UNNECESSARY); | ||
839 | |||
840 | if (is_udplite || (!copy_only && msg->msg_flags&MSG_TRUNC)) { | ||
841 | if (__udp_lib_checksum_complete(skb)) | ||
847 | goto csum_copy_err; | 842 | goto csum_copy_err; |
848 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, | 843 | copy_only = 1; |
849 | copied); | 844 | } |
850 | } else { | 845 | |
846 | if (copy_only) | ||
847 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), | ||
848 | msg->msg_iov, copied ); | ||
849 | else { | ||
851 | err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); | 850 | err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); |
852 | 851 | ||
853 | if (err == -EINVAL) | 852 | if (err == -EINVAL) |
@@ -880,7 +879,7 @@ out: | |||
880 | return err; | 879 | return err; |
881 | 880 | ||
882 | csum_copy_err: | 881 | csum_copy_err: |
883 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); | 882 | UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); |
884 | 883 | ||
885 | skb_kill_datagram(sk, skb, flags); | 884 | skb_kill_datagram(sk, skb, flags); |
886 | 885 | ||
@@ -912,11 +911,6 @@ int udp_disconnect(struct sock *sk, int flags) | |||
912 | return 0; | 911 | return 0; |
913 | } | 912 | } |
914 | 913 | ||
915 | static void udp_close(struct sock *sk, long timeout) | ||
916 | { | ||
917 | sk_common_release(sk); | ||
918 | } | ||
919 | |||
920 | /* return: | 914 | /* return: |
921 | * 1 if the the UDP system should process it | 915 | * 1 if the the UDP system should process it |
922 | * 0 if we should drop this packet | 916 | * 0 if we should drop this packet |
@@ -928,23 +922,32 @@ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb) | |||
928 | return 1; | 922 | return 1; |
929 | #else | 923 | #else |
930 | struct udp_sock *up = udp_sk(sk); | 924 | struct udp_sock *up = udp_sk(sk); |
931 | struct udphdr *uh = skb->h.uh; | 925 | struct udphdr *uh; |
932 | struct iphdr *iph; | 926 | struct iphdr *iph; |
933 | int iphlen, len; | 927 | int iphlen, len; |
934 | 928 | ||
935 | __u8 *udpdata = (__u8 *)uh + sizeof(struct udphdr); | 929 | __u8 *udpdata; |
936 | __be32 *udpdata32 = (__be32 *)udpdata; | 930 | __be32 *udpdata32; |
937 | __u16 encap_type = up->encap_type; | 931 | __u16 encap_type = up->encap_type; |
938 | 932 | ||
939 | /* if we're overly short, let UDP handle it */ | 933 | /* if we're overly short, let UDP handle it */ |
940 | if (udpdata > skb->tail) | 934 | len = skb->len - sizeof(struct udphdr); |
935 | if (len <= 0) | ||
941 | return 1; | 936 | return 1; |
942 | 937 | ||
943 | /* if this is not encapsulated socket, then just return now */ | 938 | /* if this is not encapsulated socket, then just return now */ |
944 | if (!encap_type) | 939 | if (!encap_type) |
945 | return 1; | 940 | return 1; |
946 | 941 | ||
947 | len = skb->tail - udpdata; | 942 | /* If this is a paged skb, make sure we pull up |
943 | * whatever data we need to look at. */ | ||
944 | if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8))) | ||
945 | return 1; | ||
946 | |||
947 | /* Now we can get the pointers */ | ||
948 | uh = skb->h.uh; | ||
949 | udpdata = (__u8 *)uh + sizeof(struct udphdr); | ||
950 | udpdata32 = (__be32 *)udpdata; | ||
948 | 951 | ||
949 | switch (encap_type) { | 952 | switch (encap_type) { |
950 | default: | 953 | default: |
@@ -1013,7 +1016,7 @@ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb) | |||
1013 | * Note that in the success and error cases, the skb is assumed to | 1016 | * Note that in the success and error cases, the skb is assumed to |
1014 | * have either been requeued or freed. | 1017 | * have either been requeued or freed. |
1015 | */ | 1018 | */ |
1016 | static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | 1019 | int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) |
1017 | { | 1020 | { |
1018 | struct udp_sock *up = udp_sk(sk); | 1021 | struct udp_sock *up = udp_sk(sk); |
1019 | int rc; | 1022 | int rc; |
@@ -1021,10 +1024,8 @@ static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
1021 | /* | 1024 | /* |
1022 | * Charge it to the socket, dropping if the queue is full. | 1025 | * Charge it to the socket, dropping if the queue is full. |
1023 | */ | 1026 | */ |
1024 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { | 1027 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) |
1025 | kfree_skb(skb); | 1028 | goto drop; |
1026 | return -1; | ||
1027 | } | ||
1028 | nf_reset(skb); | 1029 | nf_reset(skb); |
1029 | 1030 | ||
1030 | if (up->encap_type) { | 1031 | if (up->encap_type) { |
@@ -1048,31 +1049,68 @@ static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
1048 | if (ret < 0) { | 1049 | if (ret < 0) { |
1049 | /* process the ESP packet */ | 1050 | /* process the ESP packet */ |
1050 | ret = xfrm4_rcv_encap(skb, up->encap_type); | 1051 | ret = xfrm4_rcv_encap(skb, up->encap_type); |
1051 | UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS); | 1052 | UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag); |
1052 | return -ret; | 1053 | return -ret; |
1053 | } | 1054 | } |
1054 | /* FALLTHROUGH -- it's a UDP Packet */ | 1055 | /* FALLTHROUGH -- it's a UDP Packet */ |
1055 | } | 1056 | } |
1056 | 1057 | ||
1057 | if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) { | 1058 | /* |
1058 | if (__udp_checksum_complete(skb)) { | 1059 | * UDP-Lite specific tests, ignored on UDP sockets |
1059 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); | 1060 | */ |
1060 | kfree_skb(skb); | 1061 | if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { |
1061 | return -1; | 1062 | |
1063 | /* | ||
1064 | * MIB statistics other than incrementing the error count are | ||
1065 | * disabled for the following two types of errors: these depend | ||
1066 | * on the application settings, not on the functioning of the | ||
1067 | * protocol stack as such. | ||
1068 | * | ||
1069 | * RFC 3828 here recommends (sec 3.3): "There should also be a | ||
1070 | * way ... to ... at least let the receiving application block | ||
1071 | * delivery of packets with coverage values less than a value | ||
1072 | * provided by the application." | ||
1073 | */ | ||
1074 | if (up->pcrlen == 0) { /* full coverage was set */ | ||
1075 | LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage " | ||
1076 | "%d while full coverage %d requested\n", | ||
1077 | UDP_SKB_CB(skb)->cscov, skb->len); | ||
1078 | goto drop; | ||
1062 | } | 1079 | } |
1080 | /* The next case involves violating the min. coverage requested | ||
1081 | * by the receiver. This is subtle: if receiver wants x and x is | ||
1082 | * greater than the buffersize/MTU then receiver will complain | ||
1083 | * that it wants x while sender emits packets of smaller size y. | ||
1084 | * Therefore the above ...()->partial_cov statement is essential. | ||
1085 | */ | ||
1086 | if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { | ||
1087 | LIMIT_NETDEBUG(KERN_WARNING | ||
1088 | "UDPLITE: coverage %d too small, need min %d\n", | ||
1089 | UDP_SKB_CB(skb)->cscov, up->pcrlen); | ||
1090 | goto drop; | ||
1091 | } | ||
1092 | } | ||
1093 | |||
1094 | if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) { | ||
1095 | if (__udp_lib_checksum_complete(skb)) | ||
1096 | goto drop; | ||
1063 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1097 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1064 | } | 1098 | } |
1065 | 1099 | ||
1066 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { | 1100 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { |
1067 | /* Note that an ENOMEM error is charged twice */ | 1101 | /* Note that an ENOMEM error is charged twice */ |
1068 | if (rc == -ENOMEM) | 1102 | if (rc == -ENOMEM) |
1069 | UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS); | 1103 | UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, up->pcflag); |
1070 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); | 1104 | goto drop; |
1071 | kfree_skb(skb); | ||
1072 | return -1; | ||
1073 | } | 1105 | } |
1074 | UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS); | 1106 | |
1107 | UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag); | ||
1075 | return 0; | 1108 | return 0; |
1109 | |||
1110 | drop: | ||
1111 | UDP_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag); | ||
1112 | kfree_skb(skb); | ||
1113 | return -1; | ||
1076 | } | 1114 | } |
1077 | 1115 | ||
1078 | /* | 1116 | /* |
@@ -1081,14 +1119,16 @@ static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
1081 | * Note: called only from the BH handler context, | 1119 | * Note: called only from the BH handler context, |
1082 | * so we don't need to lock the hashes. | 1120 | * so we don't need to lock the hashes. |
1083 | */ | 1121 | */ |
1084 | static int udp_v4_mcast_deliver(struct sk_buff *skb, struct udphdr *uh, | 1122 | static int __udp4_lib_mcast_deliver(struct sk_buff *skb, |
1085 | __be32 saddr, __be32 daddr) | 1123 | struct udphdr *uh, |
1124 | __be32 saddr, __be32 daddr, | ||
1125 | struct hlist_head udptable[]) | ||
1086 | { | 1126 | { |
1087 | struct sock *sk; | 1127 | struct sock *sk; |
1088 | int dif; | 1128 | int dif; |
1089 | 1129 | ||
1090 | read_lock(&udp_hash_lock); | 1130 | read_lock(&udp_hash_lock); |
1091 | sk = sk_head(&udp_hash[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); | 1131 | sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); |
1092 | dif = skb->dev->ifindex; | 1132 | dif = skb->dev->ifindex; |
1093 | sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); | 1133 | sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); |
1094 | if (sk) { | 1134 | if (sk) { |
@@ -1122,65 +1162,75 @@ static int udp_v4_mcast_deliver(struct sk_buff *skb, struct udphdr *uh, | |||
1122 | * Otherwise, csum completion requires chacksumming packet body, | 1162 | * Otherwise, csum completion requires chacksumming packet body, |
1123 | * including udp header and folding it to skb->csum. | 1163 | * including udp header and folding it to skb->csum. |
1124 | */ | 1164 | */ |
1125 | static void udp_checksum_init(struct sk_buff *skb, struct udphdr *uh, | 1165 | static inline void udp4_csum_init(struct sk_buff *skb, struct udphdr *uh) |
1126 | unsigned short ulen, __be32 saddr, __be32 daddr) | ||
1127 | { | 1166 | { |
1128 | if (uh->check == 0) { | 1167 | if (uh->check == 0) { |
1129 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1168 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1130 | } else if (skb->ip_summed == CHECKSUM_COMPLETE) { | 1169 | } else if (skb->ip_summed == CHECKSUM_COMPLETE) { |
1131 | if (!udp_check(uh, ulen, saddr, daddr, skb->csum)) | 1170 | if (!csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr, |
1171 | skb->len, IPPROTO_UDP, skb->csum )) | ||
1132 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1172 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1133 | } | 1173 | } |
1134 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | 1174 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) |
1135 | skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); | 1175 | skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, |
1176 | skb->nh.iph->daddr, | ||
1177 | skb->len, IPPROTO_UDP, 0); | ||
1136 | /* Probably, we should checksum udp header (it should be in cache | 1178 | /* Probably, we should checksum udp header (it should be in cache |
1137 | * in any case) and data in tiny packets (< rx copybreak). | 1179 | * in any case) and data in tiny packets (< rx copybreak). |
1138 | */ | 1180 | */ |
1181 | |||
1182 | /* UDP = UDP-Lite with a non-partial checksum coverage */ | ||
1183 | UDP_SKB_CB(skb)->partial_cov = 0; | ||
1139 | } | 1184 | } |
1140 | 1185 | ||
1141 | /* | 1186 | /* |
1142 | * All we need to do is get the socket, and then do a checksum. | 1187 | * All we need to do is get the socket, and then do a checksum. |
1143 | */ | 1188 | */ |
1144 | 1189 | ||
1145 | int udp_rcv(struct sk_buff *skb) | 1190 | int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], |
1191 | int is_udplite) | ||
1146 | { | 1192 | { |
1147 | struct sock *sk; | 1193 | struct sock *sk; |
1148 | struct udphdr *uh; | 1194 | struct udphdr *uh = skb->h.uh; |
1149 | unsigned short ulen; | 1195 | unsigned short ulen; |
1150 | struct rtable *rt = (struct rtable*)skb->dst; | 1196 | struct rtable *rt = (struct rtable*)skb->dst; |
1151 | __be32 saddr = skb->nh.iph->saddr; | 1197 | __be32 saddr = skb->nh.iph->saddr; |
1152 | __be32 daddr = skb->nh.iph->daddr; | 1198 | __be32 daddr = skb->nh.iph->daddr; |
1153 | int len = skb->len; | ||
1154 | 1199 | ||
1155 | /* | 1200 | /* |
1156 | * Validate the packet and the UDP length. | 1201 | * Validate the packet. |
1157 | */ | 1202 | */ |
1158 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) | 1203 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) |
1159 | goto no_header; | 1204 | goto drop; /* No space for header. */ |
1160 | |||
1161 | uh = skb->h.uh; | ||
1162 | 1205 | ||
1163 | ulen = ntohs(uh->len); | 1206 | ulen = ntohs(uh->len); |
1164 | 1207 | if (ulen > skb->len) | |
1165 | if (ulen > len || ulen < sizeof(*uh)) | ||
1166 | goto short_packet; | 1208 | goto short_packet; |
1167 | 1209 | ||
1168 | if (pskb_trim_rcsum(skb, ulen)) | 1210 | if(! is_udplite ) { /* UDP validates ulen. */ |
1169 | goto short_packet; | 1211 | |
1212 | if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) | ||
1213 | goto short_packet; | ||
1170 | 1214 | ||
1171 | udp_checksum_init(skb, uh, ulen, saddr, daddr); | 1215 | udp4_csum_init(skb, uh); |
1216 | |||
1217 | } else { /* UDP-Lite validates cscov. */ | ||
1218 | if (udplite4_csum_init(skb, uh)) | ||
1219 | goto csum_error; | ||
1220 | } | ||
1172 | 1221 | ||
1173 | if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) | 1222 | if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) |
1174 | return udp_v4_mcast_deliver(skb, uh, saddr, daddr); | 1223 | return __udp4_lib_mcast_deliver(skb, uh, saddr, daddr, udptable); |
1175 | 1224 | ||
1176 | sk = udp_v4_lookup(saddr, uh->source, daddr, uh->dest, skb->dev->ifindex); | 1225 | sk = __udp4_lib_lookup(saddr, uh->source, daddr, uh->dest, |
1226 | skb->dev->ifindex, udptable ); | ||
1177 | 1227 | ||
1178 | if (sk != NULL) { | 1228 | if (sk != NULL) { |
1179 | int ret = udp_queue_rcv_skb(sk, skb); | 1229 | int ret = udp_queue_rcv_skb(sk, skb); |
1180 | sock_put(sk); | 1230 | sock_put(sk); |
1181 | 1231 | ||
1182 | /* a return value > 0 means to resubmit the input, but | 1232 | /* a return value > 0 means to resubmit the input, but |
1183 | * it it wants the return to be -protocol, or 0 | 1233 | * it wants the return to be -protocol, or 0 |
1184 | */ | 1234 | */ |
1185 | if (ret > 0) | 1235 | if (ret > 0) |
1186 | return -ret; | 1236 | return -ret; |
@@ -1192,10 +1242,10 @@ int udp_rcv(struct sk_buff *skb) | |||
1192 | nf_reset(skb); | 1242 | nf_reset(skb); |
1193 | 1243 | ||
1194 | /* No socket. Drop packet silently, if checksum is wrong */ | 1244 | /* No socket. Drop packet silently, if checksum is wrong */ |
1195 | if (udp_checksum_complete(skb)) | 1245 | if (udp_lib_checksum_complete(skb)) |
1196 | goto csum_error; | 1246 | goto csum_error; |
1197 | 1247 | ||
1198 | UDP_INC_STATS_BH(UDP_MIB_NOPORTS); | 1248 | UDP_INC_STATS_BH(UDP_MIB_NOPORTS, is_udplite); |
1199 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); | 1249 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); |
1200 | 1250 | ||
1201 | /* | 1251 | /* |
@@ -1206,36 +1256,40 @@ int udp_rcv(struct sk_buff *skb) | |||
1206 | return(0); | 1256 | return(0); |
1207 | 1257 | ||
1208 | short_packet: | 1258 | short_packet: |
1209 | LIMIT_NETDEBUG(KERN_DEBUG "UDP: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n", | 1259 | LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n", |
1260 | is_udplite? "-Lite" : "", | ||
1210 | NIPQUAD(saddr), | 1261 | NIPQUAD(saddr), |
1211 | ntohs(uh->source), | 1262 | ntohs(uh->source), |
1212 | ulen, | 1263 | ulen, |
1213 | len, | 1264 | skb->len, |
1214 | NIPQUAD(daddr), | 1265 | NIPQUAD(daddr), |
1215 | ntohs(uh->dest)); | 1266 | ntohs(uh->dest)); |
1216 | no_header: | 1267 | goto drop; |
1217 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); | ||
1218 | kfree_skb(skb); | ||
1219 | return(0); | ||
1220 | 1268 | ||
1221 | csum_error: | 1269 | csum_error: |
1222 | /* | 1270 | /* |
1223 | * RFC1122: OK. Discards the bad packet silently (as far as | 1271 | * RFC1122: OK. Discards the bad packet silently (as far as |
1224 | * the network is concerned, anyway) as per 4.1.3.4 (MUST). | 1272 | * the network is concerned, anyway) as per 4.1.3.4 (MUST). |
1225 | */ | 1273 | */ |
1226 | LIMIT_NETDEBUG(KERN_DEBUG "UDP: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", | 1274 | LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", |
1275 | is_udplite? "-Lite" : "", | ||
1227 | NIPQUAD(saddr), | 1276 | NIPQUAD(saddr), |
1228 | ntohs(uh->source), | 1277 | ntohs(uh->source), |
1229 | NIPQUAD(daddr), | 1278 | NIPQUAD(daddr), |
1230 | ntohs(uh->dest), | 1279 | ntohs(uh->dest), |
1231 | ulen); | 1280 | ulen); |
1232 | drop: | 1281 | drop: |
1233 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); | 1282 | UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); |
1234 | kfree_skb(skb); | 1283 | kfree_skb(skb); |
1235 | return(0); | 1284 | return(0); |
1236 | } | 1285 | } |
1237 | 1286 | ||
1238 | static int udp_destroy_sock(struct sock *sk) | 1287 | __inline__ int udp_rcv(struct sk_buff *skb) |
1288 | { | ||
1289 | return __udp4_lib_rcv(skb, udp_hash, 0); | ||
1290 | } | ||
1291 | |||
1292 | int udp_destroy_sock(struct sock *sk) | ||
1239 | { | 1293 | { |
1240 | lock_sock(sk); | 1294 | lock_sock(sk); |
1241 | udp_flush_pending_frames(sk); | 1295 | udp_flush_pending_frames(sk); |
@@ -1246,8 +1300,9 @@ static int udp_destroy_sock(struct sock *sk) | |||
1246 | /* | 1300 | /* |
1247 | * Socket option code for UDP | 1301 | * Socket option code for UDP |
1248 | */ | 1302 | */ |
1249 | static int do_udp_setsockopt(struct sock *sk, int level, int optname, | 1303 | int udp_lib_setsockopt(struct sock *sk, int level, int optname, |
1250 | char __user *optval, int optlen) | 1304 | char __user *optval, int optlen, |
1305 | int (*push_pending_frames)(struct sock *)) | ||
1251 | { | 1306 | { |
1252 | struct udp_sock *up = udp_sk(sk); | 1307 | struct udp_sock *up = udp_sk(sk); |
1253 | int val; | 1308 | int val; |
@@ -1266,7 +1321,7 @@ static int do_udp_setsockopt(struct sock *sk, int level, int optname, | |||
1266 | } else { | 1321 | } else { |
1267 | up->corkflag = 0; | 1322 | up->corkflag = 0; |
1268 | lock_sock(sk); | 1323 | lock_sock(sk); |
1269 | udp_push_pending_frames(sk, up); | 1324 | (*push_pending_frames)(sk); |
1270 | release_sock(sk); | 1325 | release_sock(sk); |
1271 | } | 1326 | } |
1272 | break; | 1327 | break; |
@@ -1284,6 +1339,32 @@ static int do_udp_setsockopt(struct sock *sk, int level, int optname, | |||
1284 | } | 1339 | } |
1285 | break; | 1340 | break; |
1286 | 1341 | ||
1342 | /* | ||
1343 | * UDP-Lite's partial checksum coverage (RFC 3828). | ||
1344 | */ | ||
1345 | /* The sender sets actual checksum coverage length via this option. | ||
1346 | * The case coverage > packet length is handled by send module. */ | ||
1347 | case UDPLITE_SEND_CSCOV: | ||
1348 | if (!up->pcflag) /* Disable the option on UDP sockets */ | ||
1349 | return -ENOPROTOOPT; | ||
1350 | if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ | ||
1351 | val = 8; | ||
1352 | up->pcslen = val; | ||
1353 | up->pcflag |= UDPLITE_SEND_CC; | ||
1354 | break; | ||
1355 | |||
1356 | /* The receiver specifies a minimum checksum coverage value. To make | ||
1357 | * sense, this should be set to at least 8 (as done below). If zero is | ||
1358 | * used, this again means full checksum coverage. */ | ||
1359 | case UDPLITE_RECV_CSCOV: | ||
1360 | if (!up->pcflag) /* Disable the option on UDP sockets */ | ||
1361 | return -ENOPROTOOPT; | ||
1362 | if (val != 0 && val < 8) /* Avoid silly minimal values. */ | ||
1363 | val = 8; | ||
1364 | up->pcrlen = val; | ||
1365 | up->pcflag |= UDPLITE_RECV_CC; | ||
1366 | break; | ||
1367 | |||
1287 | default: | 1368 | default: |
1288 | err = -ENOPROTOOPT; | 1369 | err = -ENOPROTOOPT; |
1289 | break; | 1370 | break; |
@@ -1292,26 +1373,28 @@ static int do_udp_setsockopt(struct sock *sk, int level, int optname, | |||
1292 | return err; | 1373 | return err; |
1293 | } | 1374 | } |
1294 | 1375 | ||
1295 | static int udp_setsockopt(struct sock *sk, int level, int optname, | 1376 | int udp_setsockopt(struct sock *sk, int level, int optname, |
1296 | char __user *optval, int optlen) | 1377 | char __user *optval, int optlen) |
1297 | { | 1378 | { |
1298 | if (level != SOL_UDP) | 1379 | if (level == SOL_UDP || level == SOL_UDPLITE) |
1299 | return ip_setsockopt(sk, level, optname, optval, optlen); | 1380 | return udp_lib_setsockopt(sk, level, optname, optval, optlen, |
1300 | return do_udp_setsockopt(sk, level, optname, optval, optlen); | 1381 | udp_push_pending_frames); |
1382 | return ip_setsockopt(sk, level, optname, optval, optlen); | ||
1301 | } | 1383 | } |
1302 | 1384 | ||
1303 | #ifdef CONFIG_COMPAT | 1385 | #ifdef CONFIG_COMPAT |
1304 | static int compat_udp_setsockopt(struct sock *sk, int level, int optname, | 1386 | int compat_udp_setsockopt(struct sock *sk, int level, int optname, |
1305 | char __user *optval, int optlen) | 1387 | char __user *optval, int optlen) |
1306 | { | 1388 | { |
1307 | if (level != SOL_UDP) | 1389 | if (level == SOL_UDP || level == SOL_UDPLITE) |
1308 | return compat_ip_setsockopt(sk, level, optname, optval, optlen); | 1390 | return udp_lib_setsockopt(sk, level, optname, optval, optlen, |
1309 | return do_udp_setsockopt(sk, level, optname, optval, optlen); | 1391 | udp_push_pending_frames); |
1392 | return compat_ip_setsockopt(sk, level, optname, optval, optlen); | ||
1310 | } | 1393 | } |
1311 | #endif | 1394 | #endif |
1312 | 1395 | ||
1313 | static int do_udp_getsockopt(struct sock *sk, int level, int optname, | 1396 | int udp_lib_getsockopt(struct sock *sk, int level, int optname, |
1314 | char __user *optval, int __user *optlen) | 1397 | char __user *optval, int __user *optlen) |
1315 | { | 1398 | { |
1316 | struct udp_sock *up = udp_sk(sk); | 1399 | struct udp_sock *up = udp_sk(sk); |
1317 | int val, len; | 1400 | int val, len; |
@@ -1333,6 +1416,16 @@ static int do_udp_getsockopt(struct sock *sk, int level, int optname, | |||
1333 | val = up->encap_type; | 1416 | val = up->encap_type; |
1334 | break; | 1417 | break; |
1335 | 1418 | ||
1419 | /* The following two cannot be changed on UDP sockets, the return is | ||
1420 | * always 0 (which corresponds to the full checksum coverage of UDP). */ | ||
1421 | case UDPLITE_SEND_CSCOV: | ||
1422 | val = up->pcslen; | ||
1423 | break; | ||
1424 | |||
1425 | case UDPLITE_RECV_CSCOV: | ||
1426 | val = up->pcrlen; | ||
1427 | break; | ||
1428 | |||
1336 | default: | 1429 | default: |
1337 | return -ENOPROTOOPT; | 1430 | return -ENOPROTOOPT; |
1338 | }; | 1431 | }; |
@@ -1344,21 +1437,21 @@ static int do_udp_getsockopt(struct sock *sk, int level, int optname, | |||
1344 | return 0; | 1437 | return 0; |
1345 | } | 1438 | } |
1346 | 1439 | ||
1347 | static int udp_getsockopt(struct sock *sk, int level, int optname, | 1440 | int udp_getsockopt(struct sock *sk, int level, int optname, |
1348 | char __user *optval, int __user *optlen) | 1441 | char __user *optval, int __user *optlen) |
1349 | { | 1442 | { |
1350 | if (level != SOL_UDP) | 1443 | if (level == SOL_UDP || level == SOL_UDPLITE) |
1351 | return ip_getsockopt(sk, level, optname, optval, optlen); | 1444 | return udp_lib_getsockopt(sk, level, optname, optval, optlen); |
1352 | return do_udp_getsockopt(sk, level, optname, optval, optlen); | 1445 | return ip_getsockopt(sk, level, optname, optval, optlen); |
1353 | } | 1446 | } |
1354 | 1447 | ||
1355 | #ifdef CONFIG_COMPAT | 1448 | #ifdef CONFIG_COMPAT |
1356 | static int compat_udp_getsockopt(struct sock *sk, int level, int optname, | 1449 | int compat_udp_getsockopt(struct sock *sk, int level, int optname, |
1357 | char __user *optval, int __user *optlen) | 1450 | char __user *optval, int __user *optlen) |
1358 | { | 1451 | { |
1359 | if (level != SOL_UDP) | 1452 | if (level == SOL_UDP || level == SOL_UDPLITE) |
1360 | return compat_ip_getsockopt(sk, level, optname, optval, optlen); | 1453 | return udp_lib_getsockopt(sk, level, optname, optval, optlen); |
1361 | return do_udp_getsockopt(sk, level, optname, optval, optlen); | 1454 | return compat_ip_getsockopt(sk, level, optname, optval, optlen); |
1362 | } | 1455 | } |
1363 | #endif | 1456 | #endif |
1364 | /** | 1457 | /** |
@@ -1378,7 +1471,8 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
1378 | { | 1471 | { |
1379 | unsigned int mask = datagram_poll(file, sock, wait); | 1472 | unsigned int mask = datagram_poll(file, sock, wait); |
1380 | struct sock *sk = sock->sk; | 1473 | struct sock *sk = sock->sk; |
1381 | 1474 | int is_lite = IS_UDPLITE(sk); | |
1475 | |||
1382 | /* Check for false positives due to checksum errors */ | 1476 | /* Check for false positives due to checksum errors */ |
1383 | if ( (mask & POLLRDNORM) && | 1477 | if ( (mask & POLLRDNORM) && |
1384 | !(file->f_flags & O_NONBLOCK) && | 1478 | !(file->f_flags & O_NONBLOCK) && |
@@ -1388,8 +1482,8 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
1388 | 1482 | ||
1389 | spin_lock_bh(&rcvq->lock); | 1483 | spin_lock_bh(&rcvq->lock); |
1390 | while ((skb = skb_peek(rcvq)) != NULL) { | 1484 | while ((skb = skb_peek(rcvq)) != NULL) { |
1391 | if (udp_checksum_complete(skb)) { | 1485 | if (udp_lib_checksum_complete(skb)) { |
1392 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); | 1486 | UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_lite); |
1393 | __skb_unlink(skb, rcvq); | 1487 | __skb_unlink(skb, rcvq); |
1394 | kfree_skb(skb); | 1488 | kfree_skb(skb); |
1395 | } else { | 1489 | } else { |
@@ -1411,7 +1505,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
1411 | struct proto udp_prot = { | 1505 | struct proto udp_prot = { |
1412 | .name = "UDP", | 1506 | .name = "UDP", |
1413 | .owner = THIS_MODULE, | 1507 | .owner = THIS_MODULE, |
1414 | .close = udp_close, | 1508 | .close = udp_lib_close, |
1415 | .connect = ip4_datagram_connect, | 1509 | .connect = ip4_datagram_connect, |
1416 | .disconnect = udp_disconnect, | 1510 | .disconnect = udp_disconnect, |
1417 | .ioctl = udp_ioctl, | 1511 | .ioctl = udp_ioctl, |
@@ -1422,8 +1516,8 @@ struct proto udp_prot = { | |||
1422 | .recvmsg = udp_recvmsg, | 1516 | .recvmsg = udp_recvmsg, |
1423 | .sendpage = udp_sendpage, | 1517 | .sendpage = udp_sendpage, |
1424 | .backlog_rcv = udp_queue_rcv_skb, | 1518 | .backlog_rcv = udp_queue_rcv_skb, |
1425 | .hash = udp_v4_hash, | 1519 | .hash = udp_lib_hash, |
1426 | .unhash = udp_v4_unhash, | 1520 | .unhash = udp_lib_unhash, |
1427 | .get_port = udp_v4_get_port, | 1521 | .get_port = udp_v4_get_port, |
1428 | .obj_size = sizeof(struct udp_sock), | 1522 | .obj_size = sizeof(struct udp_sock), |
1429 | #ifdef CONFIG_COMPAT | 1523 | #ifdef CONFIG_COMPAT |
@@ -1442,7 +1536,7 @@ static struct sock *udp_get_first(struct seq_file *seq) | |||
1442 | 1536 | ||
1443 | for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { | 1537 | for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { |
1444 | struct hlist_node *node; | 1538 | struct hlist_node *node; |
1445 | sk_for_each(sk, node, &udp_hash[state->bucket]) { | 1539 | sk_for_each(sk, node, state->hashtable + state->bucket) { |
1446 | if (sk->sk_family == state->family) | 1540 | if (sk->sk_family == state->family) |
1447 | goto found; | 1541 | goto found; |
1448 | } | 1542 | } |
@@ -1463,7 +1557,7 @@ try_again: | |||
1463 | } while (sk && sk->sk_family != state->family); | 1557 | } while (sk && sk->sk_family != state->family); |
1464 | 1558 | ||
1465 | if (!sk && ++state->bucket < UDP_HTABLE_SIZE) { | 1559 | if (!sk && ++state->bucket < UDP_HTABLE_SIZE) { |
1466 | sk = sk_head(&udp_hash[state->bucket]); | 1560 | sk = sk_head(state->hashtable + state->bucket); |
1467 | goto try_again; | 1561 | goto try_again; |
1468 | } | 1562 | } |
1469 | return sk; | 1563 | return sk; |
@@ -1513,6 +1607,7 @@ static int udp_seq_open(struct inode *inode, struct file *file) | |||
1513 | if (!s) | 1607 | if (!s) |
1514 | goto out; | 1608 | goto out; |
1515 | s->family = afinfo->family; | 1609 | s->family = afinfo->family; |
1610 | s->hashtable = afinfo->hashtable; | ||
1516 | s->seq_ops.start = udp_seq_start; | 1611 | s->seq_ops.start = udp_seq_start; |
1517 | s->seq_ops.next = udp_seq_next; | 1612 | s->seq_ops.next = udp_seq_next; |
1518 | s->seq_ops.show = afinfo->seq_show; | 1613 | s->seq_ops.show = afinfo->seq_show; |
@@ -1579,7 +1674,7 @@ static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket) | |||
1579 | atomic_read(&sp->sk_refcnt), sp); | 1674 | atomic_read(&sp->sk_refcnt), sp); |
1580 | } | 1675 | } |
1581 | 1676 | ||
1582 | static int udp4_seq_show(struct seq_file *seq, void *v) | 1677 | int udp4_seq_show(struct seq_file *seq, void *v) |
1583 | { | 1678 | { |
1584 | if (v == SEQ_START_TOKEN) | 1679 | if (v == SEQ_START_TOKEN) |
1585 | seq_printf(seq, "%-127s\n", | 1680 | seq_printf(seq, "%-127s\n", |
@@ -1602,6 +1697,7 @@ static struct udp_seq_afinfo udp4_seq_afinfo = { | |||
1602 | .owner = THIS_MODULE, | 1697 | .owner = THIS_MODULE, |
1603 | .name = "udp", | 1698 | .name = "udp", |
1604 | .family = AF_INET, | 1699 | .family = AF_INET, |
1700 | .hashtable = udp_hash, | ||
1605 | .seq_show = udp4_seq_show, | 1701 | .seq_show = udp4_seq_show, |
1606 | .seq_fops = &udp4_seq_fops, | 1702 | .seq_fops = &udp4_seq_fops, |
1607 | }; | 1703 | }; |
@@ -1624,6 +1720,8 @@ EXPORT_SYMBOL(udp_ioctl); | |||
1624 | EXPORT_SYMBOL(udp_get_port); | 1720 | EXPORT_SYMBOL(udp_get_port); |
1625 | EXPORT_SYMBOL(udp_prot); | 1721 | EXPORT_SYMBOL(udp_prot); |
1626 | EXPORT_SYMBOL(udp_sendmsg); | 1722 | EXPORT_SYMBOL(udp_sendmsg); |
1723 | EXPORT_SYMBOL(udp_lib_getsockopt); | ||
1724 | EXPORT_SYMBOL(udp_lib_setsockopt); | ||
1627 | EXPORT_SYMBOL(udp_poll); | 1725 | EXPORT_SYMBOL(udp_poll); |
1628 | 1726 | ||
1629 | #ifdef CONFIG_PROC_FS | 1727 | #ifdef CONFIG_PROC_FS |
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h new file mode 100644 index 000000000000..f6f4277ba6dc --- /dev/null +++ b/net/ipv4/udp_impl.h | |||
@@ -0,0 +1,38 @@ | |||
1 | #ifndef _UDP4_IMPL_H | ||
2 | #define _UDP4_IMPL_H | ||
3 | #include <net/udp.h> | ||
4 | #include <net/udplite.h> | ||
5 | #include <net/protocol.h> | ||
6 | #include <net/inet_common.h> | ||
7 | |||
8 | extern int __udp4_lib_rcv(struct sk_buff *, struct hlist_head [], int ); | ||
9 | extern void __udp4_lib_err(struct sk_buff *, u32, struct hlist_head []); | ||
10 | |||
11 | extern int __udp_lib_get_port(struct sock *sk, unsigned short snum, | ||
12 | struct hlist_head udptable[], int *port_rover, | ||
13 | int (*)(const struct sock*,const struct sock*)); | ||
14 | extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *); | ||
15 | |||
16 | |||
17 | extern int udp_setsockopt(struct sock *sk, int level, int optname, | ||
18 | char __user *optval, int optlen); | ||
19 | extern int udp_getsockopt(struct sock *sk, int level, int optname, | ||
20 | char __user *optval, int __user *optlen); | ||
21 | |||
22 | #ifdef CONFIG_COMPAT | ||
23 | extern int compat_udp_setsockopt(struct sock *sk, int level, int optname, | ||
24 | char __user *optval, int optlen); | ||
25 | extern int compat_udp_getsockopt(struct sock *sk, int level, int optname, | ||
26 | char __user *optval, int __user *optlen); | ||
27 | #endif | ||
28 | extern int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | ||
29 | size_t len, int noblock, int flags, int *addr_len); | ||
30 | extern int udp_sendpage(struct sock *sk, struct page *page, int offset, | ||
31 | size_t size, int flags); | ||
32 | extern int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); | ||
33 | extern int udp_destroy_sock(struct sock *sk); | ||
34 | |||
35 | #ifdef CONFIG_PROC_FS | ||
36 | extern int udp4_seq_show(struct seq_file *seq, void *v); | ||
37 | #endif | ||
38 | #endif /* _UDP4_IMPL_H */ | ||
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c new file mode 100644 index 000000000000..b28fe1edf98b --- /dev/null +++ b/net/ipv4/udplite.c | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * UDPLITE An implementation of the UDP-Lite protocol (RFC 3828). | ||
3 | * | ||
4 | * Version: $Id: udplite.c,v 1.25 2006/10/19 07:22:36 gerrit Exp $ | ||
5 | * | ||
6 | * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> | ||
7 | * | ||
8 | * Changes: | ||
9 | * Fixes: | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | #include "udp_impl.h" | ||
16 | DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics) __read_mostly; | ||
17 | |||
18 | struct hlist_head udplite_hash[UDP_HTABLE_SIZE]; | ||
19 | static int udplite_port_rover; | ||
20 | |||
21 | int udplite_get_port(struct sock *sk, unsigned short p, | ||
22 | int (*c)(const struct sock *, const struct sock *)) | ||
23 | { | ||
24 | return __udp_lib_get_port(sk, p, udplite_hash, &udplite_port_rover, c); | ||
25 | } | ||
26 | |||
27 | static int udplite_v4_get_port(struct sock *sk, unsigned short snum) | ||
28 | { | ||
29 | return udplite_get_port(sk, snum, ipv4_rcv_saddr_equal); | ||
30 | } | ||
31 | |||
32 | static int udplite_rcv(struct sk_buff *skb) | ||
33 | { | ||
34 | return __udp4_lib_rcv(skb, udplite_hash, 1); | ||
35 | } | ||
36 | |||
37 | static void udplite_err(struct sk_buff *skb, u32 info) | ||
38 | { | ||
39 | return __udp4_lib_err(skb, info, udplite_hash); | ||
40 | } | ||
41 | |||
42 | static struct net_protocol udplite_protocol = { | ||
43 | .handler = udplite_rcv, | ||
44 | .err_handler = udplite_err, | ||
45 | .no_policy = 1, | ||
46 | }; | ||
47 | |||
48 | struct proto udplite_prot = { | ||
49 | .name = "UDP-Lite", | ||
50 | .owner = THIS_MODULE, | ||
51 | .close = udp_lib_close, | ||
52 | .connect = ip4_datagram_connect, | ||
53 | .disconnect = udp_disconnect, | ||
54 | .ioctl = udp_ioctl, | ||
55 | .init = udplite_sk_init, | ||
56 | .destroy = udp_destroy_sock, | ||
57 | .setsockopt = udp_setsockopt, | ||
58 | .getsockopt = udp_getsockopt, | ||
59 | .sendmsg = udp_sendmsg, | ||
60 | .recvmsg = udp_recvmsg, | ||
61 | .sendpage = udp_sendpage, | ||
62 | .backlog_rcv = udp_queue_rcv_skb, | ||
63 | .hash = udp_lib_hash, | ||
64 | .unhash = udp_lib_unhash, | ||
65 | .get_port = udplite_v4_get_port, | ||
66 | .obj_size = sizeof(struct udp_sock), | ||
67 | #ifdef CONFIG_COMPAT | ||
68 | .compat_setsockopt = compat_udp_setsockopt, | ||
69 | .compat_getsockopt = compat_udp_getsockopt, | ||
70 | #endif | ||
71 | }; | ||
72 | |||
73 | static struct inet_protosw udplite4_protosw = { | ||
74 | .type = SOCK_DGRAM, | ||
75 | .protocol = IPPROTO_UDPLITE, | ||
76 | .prot = &udplite_prot, | ||
77 | .ops = &inet_dgram_ops, | ||
78 | .capability = -1, | ||
79 | .no_check = 0, /* must checksum (RFC 3828) */ | ||
80 | .flags = INET_PROTOSW_PERMANENT, | ||
81 | }; | ||
82 | |||
83 | #ifdef CONFIG_PROC_FS | ||
84 | static struct file_operations udplite4_seq_fops; | ||
85 | static struct udp_seq_afinfo udplite4_seq_afinfo = { | ||
86 | .owner = THIS_MODULE, | ||
87 | .name = "udplite", | ||
88 | .family = AF_INET, | ||
89 | .hashtable = udplite_hash, | ||
90 | .seq_show = udp4_seq_show, | ||
91 | .seq_fops = &udplite4_seq_fops, | ||
92 | }; | ||
93 | #endif | ||
94 | |||
95 | void __init udplite4_register(void) | ||
96 | { | ||
97 | if (proto_register(&udplite_prot, 1)) | ||
98 | goto out_register_err; | ||
99 | |||
100 | if (inet_add_protocol(&udplite_protocol, IPPROTO_UDPLITE) < 0) | ||
101 | goto out_unregister_proto; | ||
102 | |||
103 | inet_register_protosw(&udplite4_protosw); | ||
104 | |||
105 | #ifdef CONFIG_PROC_FS | ||
106 | if (udp_proc_register(&udplite4_seq_afinfo)) /* udplite4_proc_init() */ | ||
107 | printk(KERN_ERR "%s: Cannot register /proc!\n", __FUNCTION__); | ||
108 | #endif | ||
109 | return; | ||
110 | |||
111 | out_unregister_proto: | ||
112 | proto_unregister(&udplite_prot); | ||
113 | out_register_err: | ||
114 | printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __FUNCTION__); | ||
115 | } | ||
116 | |||
117 | EXPORT_SYMBOL(udplite_hash); | ||
118 | EXPORT_SYMBOL(udplite_prot); | ||
119 | EXPORT_SYMBOL(udplite_get_port); | ||
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 1bed0cdf53e3..d4107bb701b5 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -72,8 +72,8 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int | |||
72 | struct dst_entry *dst, *dst_prev; | 72 | struct dst_entry *dst, *dst_prev; |
73 | struct rtable *rt0 = (struct rtable*)(*dst_p); | 73 | struct rtable *rt0 = (struct rtable*)(*dst_p); |
74 | struct rtable *rt = rt0; | 74 | struct rtable *rt = rt0; |
75 | u32 remote = fl->fl4_dst; | 75 | __be32 remote = fl->fl4_dst; |
76 | u32 local = fl->fl4_src; | 76 | __be32 local = fl->fl4_src; |
77 | struct flowi fl_tunnel = { | 77 | struct flowi fl_tunnel = { |
78 | .nl_u = { | 78 | .nl_u = { |
79 | .ip4_u = { | 79 | .ip4_u = { |
@@ -199,11 +199,12 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl) | |||
199 | if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) { | 199 | if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) { |
200 | switch (iph->protocol) { | 200 | switch (iph->protocol) { |
201 | case IPPROTO_UDP: | 201 | case IPPROTO_UDP: |
202 | case IPPROTO_UDPLITE: | ||
202 | case IPPROTO_TCP: | 203 | case IPPROTO_TCP: |
203 | case IPPROTO_SCTP: | 204 | case IPPROTO_SCTP: |
204 | case IPPROTO_DCCP: | 205 | case IPPROTO_DCCP: |
205 | if (pskb_may_pull(skb, xprth + 4 - skb->data)) { | 206 | if (pskb_may_pull(skb, xprth + 4 - skb->data)) { |
206 | u16 *ports = (u16 *)xprth; | 207 | __be16 *ports = (__be16 *)xprth; |
207 | 208 | ||
208 | fl->fl_ip_sport = ports[0]; | 209 | fl->fl_ip_sport = ports[0]; |
209 | fl->fl_ip_dport = ports[1]; | 210 | fl->fl_ip_dport = ports[1]; |
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 6e48f52e197c..deb4101a2a81 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig | |||
@@ -196,10 +196,3 @@ config IPV6_SUBTREES | |||
196 | 196 | ||
197 | If unsure, say N. | 197 | If unsure, say N. |
198 | 198 | ||
199 | config IPV6_ROUTE_FWMARK | ||
200 | bool "IPv6: use netfilter MARK value as routing key" | ||
201 | depends on IPV6_MULTIPLE_TABLES && NETFILTER | ||
202 | ---help--- | ||
203 | If you say Y here, you will be able to specify different routes for | ||
204 | packets with different mark values (see iptables(8), MARK target). | ||
205 | |||
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index addcc011bc01..8bacda109b7f 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile | |||
@@ -5,8 +5,8 @@ | |||
5 | obj-$(CONFIG_IPV6) += ipv6.o | 5 | obj-$(CONFIG_IPV6) += ipv6.o |
6 | 6 | ||
7 | ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ | 7 | ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ |
8 | route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \ | 8 | route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \ |
9 | protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \ | 9 | raw.o protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \ |
10 | exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \ | 10 | exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \ |
11 | ip6_flowlabel.o ipv6_syms.o inet6_connection_sock.o | 11 | ip6_flowlabel.o ipv6_syms.o inet6_connection_sock.o |
12 | 12 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index b312a5f7a759..a5e8d207a51b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -232,7 +232,7 @@ static inline unsigned ipv6_addr_scope2type(unsigned scope) | |||
232 | 232 | ||
233 | int __ipv6_addr_type(const struct in6_addr *addr) | 233 | int __ipv6_addr_type(const struct in6_addr *addr) |
234 | { | 234 | { |
235 | u32 st; | 235 | __be32 st; |
236 | 236 | ||
237 | st = addr->s6_addr32[0]; | 237 | st = addr->s6_addr32[0]; |
238 | 238 | ||
@@ -1164,7 +1164,7 @@ record_it: | |||
1164 | int ipv6_get_saddr(struct dst_entry *dst, | 1164 | int ipv6_get_saddr(struct dst_entry *dst, |
1165 | struct in6_addr *daddr, struct in6_addr *saddr) | 1165 | struct in6_addr *daddr, struct in6_addr *saddr) |
1166 | { | 1166 | { |
1167 | return ipv6_dev_get_saddr(dst ? ((struct rt6_info *)dst)->rt6i_idev->dev : NULL, daddr, saddr); | 1167 | return ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, daddr, saddr); |
1168 | } | 1168 | } |
1169 | 1169 | ||
1170 | 1170 | ||
@@ -3098,10 +3098,9 @@ static inline int rt_scope(int ifa_scope) | |||
3098 | 3098 | ||
3099 | static inline int inet6_ifaddr_msgsize(void) | 3099 | static inline int inet6_ifaddr_msgsize(void) |
3100 | { | 3100 | { |
3101 | return nlmsg_total_size(sizeof(struct ifaddrmsg) + | 3101 | return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) |
3102 | nla_total_size(16) + | 3102 | + nla_total_size(16) /* IFA_ADDRESS */ |
3103 | nla_total_size(sizeof(struct ifa_cacheinfo)) + | 3103 | + nla_total_size(sizeof(struct ifa_cacheinfo)); |
3104 | 128); | ||
3105 | } | 3104 | } |
3106 | 3105 | ||
3107 | static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, | 3106 | static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, |
@@ -3329,10 +3328,8 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh, | |||
3329 | 3328 | ||
3330 | err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid, | 3329 | err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid, |
3331 | nlh->nlmsg_seq, RTM_NEWADDR, 0); | 3330 | nlh->nlmsg_seq, RTM_NEWADDR, 0); |
3332 | if (err < 0) { | 3331 | /* failure implies BUG in inet6_ifaddr_msgsize() */ |
3333 | kfree_skb(skb); | 3332 | BUG_ON(err < 0); |
3334 | goto errout_ifa; | ||
3335 | } | ||
3336 | 3333 | ||
3337 | err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid); | 3334 | err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid); |
3338 | errout_ifa: | 3335 | errout_ifa: |
@@ -3351,10 +3348,8 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) | |||
3351 | goto errout; | 3348 | goto errout; |
3352 | 3349 | ||
3353 | err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0); | 3350 | err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0); |
3354 | if (err < 0) { | 3351 | /* failure implies BUG in inet6_ifaddr_msgsize() */ |
3355 | kfree_skb(skb); | 3352 | BUG_ON(err < 0); |
3356 | goto errout; | ||
3357 | } | ||
3358 | 3353 | ||
3359 | err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); | 3354 | err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); |
3360 | errout: | 3355 | errout: |
@@ -3365,6 +3360,8 @@ errout: | |||
3365 | static void inline ipv6_store_devconf(struct ipv6_devconf *cnf, | 3360 | static void inline ipv6_store_devconf(struct ipv6_devconf *cnf, |
3366 | __s32 *array, int bytes) | 3361 | __s32 *array, int bytes) |
3367 | { | 3362 | { |
3363 | BUG_ON(bytes < (DEVCONF_MAX * 4)); | ||
3364 | |||
3368 | memset(array, 0, bytes); | 3365 | memset(array, 0, bytes); |
3369 | array[DEVCONF_FORWARDING] = cnf->forwarding; | 3366 | array[DEVCONF_FORWARDING] = cnf->forwarding; |
3370 | array[DEVCONF_HOPLIMIT] = cnf->hop_limit; | 3367 | array[DEVCONF_HOPLIMIT] = cnf->hop_limit; |
@@ -3397,80 +3394,76 @@ static void inline ipv6_store_devconf(struct ipv6_devconf *cnf, | |||
3397 | array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp; | 3394 | array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp; |
3398 | } | 3395 | } |
3399 | 3396 | ||
3400 | /* Maximum length of ifinfomsg attributes */ | 3397 | static inline size_t inet6_if_nlmsg_size(void) |
3401 | #define INET6_IFINFO_RTA_SPACE \ | 3398 | { |
3402 | RTA_SPACE(IFNAMSIZ) /* IFNAME */ + \ | 3399 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) |
3403 | RTA_SPACE(MAX_ADDR_LEN) /* ADDRESS */ + \ | 3400 | + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ |
3404 | RTA_SPACE(sizeof(u32)) /* MTU */ + \ | 3401 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ |
3405 | RTA_SPACE(sizeof(int)) /* LINK */ + \ | 3402 | + nla_total_size(4) /* IFLA_MTU */ |
3406 | RTA_SPACE(0) /* PROTINFO */ + \ | 3403 | + nla_total_size(4) /* IFLA_LINK */ |
3407 | RTA_SPACE(sizeof(u32)) /* FLAGS */ + \ | 3404 | + nla_total_size( /* IFLA_PROTINFO */ |
3408 | RTA_SPACE(sizeof(struct ifla_cacheinfo)) /* CACHEINFO */ + \ | 3405 | nla_total_size(4) /* IFLA_INET6_FLAGS */ |
3409 | RTA_SPACE(sizeof(__s32[DEVCONF_MAX])) /* CONF */ | 3406 | + nla_total_size(sizeof(struct ifla_cacheinfo)) |
3407 | + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */ | ||
3408 | ); | ||
3409 | } | ||
3410 | 3410 | ||
3411 | static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, | 3411 | static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, |
3412 | u32 pid, u32 seq, int event, unsigned int flags) | 3412 | u32 pid, u32 seq, int event, unsigned int flags) |
3413 | { | 3413 | { |
3414 | struct net_device *dev = idev->dev; | 3414 | struct net_device *dev = idev->dev; |
3415 | __s32 *array = NULL; | 3415 | struct nlattr *conf; |
3416 | struct ifinfomsg *r; | 3416 | struct ifinfomsg *hdr; |
3417 | struct nlmsghdr *nlh; | 3417 | struct nlmsghdr *nlh; |
3418 | unsigned char *b = skb->tail; | 3418 | void *protoinfo; |
3419 | struct rtattr *subattr; | 3419 | struct ifla_cacheinfo ci; |
3420 | __u32 mtu = dev->mtu; | 3420 | |
3421 | struct ifla_cacheinfo ci; | 3421 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); |
3422 | 3422 | if (nlh == NULL) | |
3423 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); | 3423 | return -ENOBUFS; |
3424 | r = NLMSG_DATA(nlh); | 3424 | |
3425 | r->ifi_family = AF_INET6; | 3425 | hdr = nlmsg_data(nlh); |
3426 | r->__ifi_pad = 0; | 3426 | hdr->ifi_family = AF_INET6; |
3427 | r->ifi_type = dev->type; | 3427 | hdr->__ifi_pad = 0; |
3428 | r->ifi_index = dev->ifindex; | 3428 | hdr->ifi_type = dev->type; |
3429 | r->ifi_flags = dev_get_flags(dev); | 3429 | hdr->ifi_index = dev->ifindex; |
3430 | r->ifi_change = 0; | 3430 | hdr->ifi_flags = dev_get_flags(dev); |
3431 | 3431 | hdr->ifi_change = 0; | |
3432 | RTA_PUT(skb, IFLA_IFNAME, strlen(dev->name)+1, dev->name); | 3432 | |
3433 | NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); | ||
3433 | 3434 | ||
3434 | if (dev->addr_len) | 3435 | if (dev->addr_len) |
3435 | RTA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); | 3436 | NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); |
3436 | 3437 | ||
3437 | RTA_PUT(skb, IFLA_MTU, sizeof(mtu), &mtu); | 3438 | NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); |
3438 | if (dev->ifindex != dev->iflink) | 3439 | if (dev->ifindex != dev->iflink) |
3439 | RTA_PUT(skb, IFLA_LINK, sizeof(int), &dev->iflink); | 3440 | NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); |
3440 | |||
3441 | subattr = (struct rtattr*)skb->tail; | ||
3442 | 3441 | ||
3443 | RTA_PUT(skb, IFLA_PROTINFO, 0, NULL); | 3442 | protoinfo = nla_nest_start(skb, IFLA_PROTINFO); |
3443 | if (protoinfo == NULL) | ||
3444 | goto nla_put_failure; | ||
3444 | 3445 | ||
3445 | /* return the device flags */ | 3446 | NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags); |
3446 | RTA_PUT(skb, IFLA_INET6_FLAGS, sizeof(__u32), &idev->if_flags); | ||
3447 | 3447 | ||
3448 | /* return interface cacheinfo */ | ||
3449 | ci.max_reasm_len = IPV6_MAXPLEN; | 3448 | ci.max_reasm_len = IPV6_MAXPLEN; |
3450 | ci.tstamp = (__u32)(TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) / HZ * 100 | 3449 | ci.tstamp = (__u32)(TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) / HZ * 100 |
3451 | + TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) % HZ * 100 / HZ); | 3450 | + TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) % HZ * 100 / HZ); |
3452 | ci.reachable_time = idev->nd_parms->reachable_time; | 3451 | ci.reachable_time = idev->nd_parms->reachable_time; |
3453 | ci.retrans_time = idev->nd_parms->retrans_time; | 3452 | ci.retrans_time = idev->nd_parms->retrans_time; |
3454 | RTA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci); | 3453 | NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci); |
3455 | 3454 | ||
3456 | /* return the device sysctl params */ | 3455 | conf = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32)); |
3457 | if ((array = kmalloc(DEVCONF_MAX * sizeof(*array), GFP_ATOMIC)) == NULL) | 3456 | if (conf == NULL) |
3458 | goto rtattr_failure; | 3457 | goto nla_put_failure; |
3459 | ipv6_store_devconf(&idev->cnf, array, DEVCONF_MAX * sizeof(*array)); | 3458 | ipv6_store_devconf(&idev->cnf, nla_data(conf), nla_len(conf)); |
3460 | RTA_PUT(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(*array), array); | ||
3461 | 3459 | ||
3462 | /* XXX - Statistics/MC not implemented */ | 3460 | /* XXX - Statistics/MC not implemented */ |
3463 | subattr->rta_len = skb->tail - (u8*)subattr; | ||
3464 | 3461 | ||
3465 | nlh->nlmsg_len = skb->tail - b; | 3462 | nla_nest_end(skb, protoinfo); |
3466 | kfree(array); | 3463 | return nlmsg_end(skb, nlh); |
3467 | return skb->len; | ||
3468 | 3464 | ||
3469 | nlmsg_failure: | 3465 | nla_put_failure: |
3470 | rtattr_failure: | 3466 | return nlmsg_cancel(skb, nlh); |
3471 | kfree(array); | ||
3472 | skb_trim(skb, b - skb->data); | ||
3473 | return -1; | ||
3474 | } | 3467 | } |
3475 | 3468 | ||
3476 | static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | 3469 | static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) |
@@ -3501,18 +3494,15 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
3501 | void inet6_ifinfo_notify(int event, struct inet6_dev *idev) | 3494 | void inet6_ifinfo_notify(int event, struct inet6_dev *idev) |
3502 | { | 3495 | { |
3503 | struct sk_buff *skb; | 3496 | struct sk_buff *skb; |
3504 | int payload = sizeof(struct ifinfomsg) + INET6_IFINFO_RTA_SPACE; | ||
3505 | int err = -ENOBUFS; | 3497 | int err = -ENOBUFS; |
3506 | 3498 | ||
3507 | skb = nlmsg_new(nlmsg_total_size(payload), GFP_ATOMIC); | 3499 | skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC); |
3508 | if (skb == NULL) | 3500 | if (skb == NULL) |
3509 | goto errout; | 3501 | goto errout; |
3510 | 3502 | ||
3511 | err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0); | 3503 | err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0); |
3512 | if (err < 0) { | 3504 | /* failure implies BUG in inet6_if_nlmsg_size() */ |
3513 | kfree_skb(skb); | 3505 | BUG_ON(err < 0); |
3514 | goto errout; | ||
3515 | } | ||
3516 | 3506 | ||
3517 | err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); | 3507 | err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); |
3518 | errout: | 3508 | errout: |
@@ -3520,22 +3510,26 @@ errout: | |||
3520 | rtnl_set_sk_err(RTNLGRP_IPV6_IFADDR, err); | 3510 | rtnl_set_sk_err(RTNLGRP_IPV6_IFADDR, err); |
3521 | } | 3511 | } |
3522 | 3512 | ||
3523 | /* Maximum length of prefix_cacheinfo attributes */ | 3513 | static inline size_t inet6_prefix_nlmsg_size(void) |
3524 | #define INET6_PREFIX_RTA_SPACE \ | 3514 | { |
3525 | RTA_SPACE(sizeof(((struct prefix_info *)NULL)->prefix)) /* ADDRESS */ + \ | 3515 | return NLMSG_ALIGN(sizeof(struct prefixmsg)) |
3526 | RTA_SPACE(sizeof(struct prefix_cacheinfo)) /* CACHEINFO */ | 3516 | + nla_total_size(sizeof(struct in6_addr)) |
3517 | + nla_total_size(sizeof(struct prefix_cacheinfo)); | ||
3518 | } | ||
3527 | 3519 | ||
3528 | static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev, | 3520 | static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev, |
3529 | struct prefix_info *pinfo, u32 pid, u32 seq, | 3521 | struct prefix_info *pinfo, u32 pid, u32 seq, |
3530 | int event, unsigned int flags) | 3522 | int event, unsigned int flags) |
3531 | { | 3523 | { |
3532 | struct prefixmsg *pmsg; | 3524 | struct prefixmsg *pmsg; |
3533 | struct nlmsghdr *nlh; | 3525 | struct nlmsghdr *nlh; |
3534 | unsigned char *b = skb->tail; | ||
3535 | struct prefix_cacheinfo ci; | 3526 | struct prefix_cacheinfo ci; |
3536 | 3527 | ||
3537 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*pmsg), flags); | 3528 | nlh = nlmsg_put(skb, pid, seq, event, sizeof(*pmsg), flags); |
3538 | pmsg = NLMSG_DATA(nlh); | 3529 | if (nlh == NULL) |
3530 | return -ENOBUFS; | ||
3531 | |||
3532 | pmsg = nlmsg_data(nlh); | ||
3539 | pmsg->prefix_family = AF_INET6; | 3533 | pmsg->prefix_family = AF_INET6; |
3540 | pmsg->prefix_pad1 = 0; | 3534 | pmsg->prefix_pad1 = 0; |
3541 | pmsg->prefix_pad2 = 0; | 3535 | pmsg->prefix_pad2 = 0; |
@@ -3543,44 +3537,37 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev, | |||
3543 | pmsg->prefix_len = pinfo->prefix_len; | 3537 | pmsg->prefix_len = pinfo->prefix_len; |
3544 | pmsg->prefix_type = pinfo->type; | 3538 | pmsg->prefix_type = pinfo->type; |
3545 | pmsg->prefix_pad3 = 0; | 3539 | pmsg->prefix_pad3 = 0; |
3546 | |||
3547 | pmsg->prefix_flags = 0; | 3540 | pmsg->prefix_flags = 0; |
3548 | if (pinfo->onlink) | 3541 | if (pinfo->onlink) |
3549 | pmsg->prefix_flags |= IF_PREFIX_ONLINK; | 3542 | pmsg->prefix_flags |= IF_PREFIX_ONLINK; |
3550 | if (pinfo->autoconf) | 3543 | if (pinfo->autoconf) |
3551 | pmsg->prefix_flags |= IF_PREFIX_AUTOCONF; | 3544 | pmsg->prefix_flags |= IF_PREFIX_AUTOCONF; |
3552 | 3545 | ||
3553 | RTA_PUT(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix); | 3546 | NLA_PUT(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix); |
3554 | 3547 | ||
3555 | ci.preferred_time = ntohl(pinfo->prefered); | 3548 | ci.preferred_time = ntohl(pinfo->prefered); |
3556 | ci.valid_time = ntohl(pinfo->valid); | 3549 | ci.valid_time = ntohl(pinfo->valid); |
3557 | RTA_PUT(skb, PREFIX_CACHEINFO, sizeof(ci), &ci); | 3550 | NLA_PUT(skb, PREFIX_CACHEINFO, sizeof(ci), &ci); |
3558 | 3551 | ||
3559 | nlh->nlmsg_len = skb->tail - b; | 3552 | return nlmsg_end(skb, nlh); |
3560 | return skb->len; | ||
3561 | 3553 | ||
3562 | nlmsg_failure: | 3554 | nla_put_failure: |
3563 | rtattr_failure: | 3555 | return nlmsg_cancel(skb, nlh); |
3564 | skb_trim(skb, b - skb->data); | ||
3565 | return -1; | ||
3566 | } | 3556 | } |
3567 | 3557 | ||
3568 | static void inet6_prefix_notify(int event, struct inet6_dev *idev, | 3558 | static void inet6_prefix_notify(int event, struct inet6_dev *idev, |
3569 | struct prefix_info *pinfo) | 3559 | struct prefix_info *pinfo) |
3570 | { | 3560 | { |
3571 | struct sk_buff *skb; | 3561 | struct sk_buff *skb; |
3572 | int payload = sizeof(struct prefixmsg) + INET6_PREFIX_RTA_SPACE; | ||
3573 | int err = -ENOBUFS; | 3562 | int err = -ENOBUFS; |
3574 | 3563 | ||
3575 | skb = nlmsg_new(nlmsg_total_size(payload), GFP_ATOMIC); | 3564 | skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC); |
3576 | if (skb == NULL) | 3565 | if (skb == NULL) |
3577 | goto errout; | 3566 | goto errout; |
3578 | 3567 | ||
3579 | err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0); | 3568 | err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0); |
3580 | if (err < 0) { | 3569 | /* failure implies BUG in inet6_prefix_nlmsg_size() */ |
3581 | kfree_skb(skb); | 3570 | BUG_ON(err < 0); |
3582 | goto errout; | ||
3583 | } | ||
3584 | 3571 | ||
3585 | err = rtnl_notify(skb, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC); | 3572 | err = rtnl_notify(skb, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC); |
3586 | errout: | 3573 | errout: |
@@ -3982,10 +3969,9 @@ static void addrconf_sysctl_register(struct inet6_dev *idev, struct ipv6_devconf | |||
3982 | struct addrconf_sysctl_table *t; | 3969 | struct addrconf_sysctl_table *t; |
3983 | char *dev_name = NULL; | 3970 | char *dev_name = NULL; |
3984 | 3971 | ||
3985 | t = kmalloc(sizeof(*t), GFP_KERNEL); | 3972 | t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL); |
3986 | if (t == NULL) | 3973 | if (t == NULL) |
3987 | return; | 3974 | return; |
3988 | memcpy(t, &addrconf_sysctl, sizeof(*t)); | ||
3989 | for (i=0; t->addrconf_vars[i].data; i++) { | 3975 | for (i=0; t->addrconf_vars[i].data; i++) { |
3990 | t->addrconf_vars[i].data += (char*)p - (char*)&ipv6_devconf; | 3976 | t->addrconf_vars[i].data += (char*)p - (char*)&ipv6_devconf; |
3991 | t->addrconf_vars[i].de = NULL; | 3977 | t->addrconf_vars[i].de = NULL; |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 858cae29581c..87c8f54872b7 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <net/ip.h> | 49 | #include <net/ip.h> |
50 | #include <net/ipv6.h> | 50 | #include <net/ipv6.h> |
51 | #include <net/udp.h> | 51 | #include <net/udp.h> |
52 | #include <net/udplite.h> | ||
52 | #include <net/tcp.h> | 53 | #include <net/tcp.h> |
53 | #include <net/ipip.h> | 54 | #include <net/ipip.h> |
54 | #include <net/protocol.h> | 55 | #include <net/protocol.h> |
@@ -221,7 +222,7 @@ lookup_protocol: | |||
221 | * the user to assign a number at socket | 222 | * the user to assign a number at socket |
222 | * creation time automatically shares. | 223 | * creation time automatically shares. |
223 | */ | 224 | */ |
224 | inet->sport = ntohs(inet->num); | 225 | inet->sport = htons(inet->num); |
225 | sk->sk_prot->hash(sk); | 226 | sk->sk_prot->hash(sk); |
226 | } | 227 | } |
227 | if (sk->sk_prot->init) { | 228 | if (sk->sk_prot->init) { |
@@ -341,7 +342,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
341 | sk->sk_userlocks |= SOCK_BINDADDR_LOCK; | 342 | sk->sk_userlocks |= SOCK_BINDADDR_LOCK; |
342 | if (snum) | 343 | if (snum) |
343 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; | 344 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; |
344 | inet->sport = ntohs(inet->num); | 345 | inet->sport = htons(inet->num); |
345 | inet->dport = 0; | 346 | inet->dport = 0; |
346 | inet->daddr = 0; | 347 | inet->daddr = 0; |
347 | out: | 348 | out: |
@@ -678,7 +679,7 @@ int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) | |||
678 | if (np->rxopt.all) { | 679 | if (np->rxopt.all) { |
679 | if ((opt->hop && (np->rxopt.bits.hopopts || | 680 | if ((opt->hop && (np->rxopt.bits.hopopts || |
680 | np->rxopt.bits.ohopopts)) || | 681 | np->rxopt.bits.ohopopts)) || |
681 | ((IPV6_FLOWINFO_MASK & *(u32*)skb->nh.raw) && | 682 | ((IPV6_FLOWINFO_MASK & *(__be32*)skb->nh.raw) && |
682 | np->rxopt.bits.rxflow) || | 683 | np->rxopt.bits.rxflow) || |
683 | (opt->srcrt && (np->rxopt.bits.srcrt || | 684 | (opt->srcrt && (np->rxopt.bits.srcrt || |
684 | np->rxopt.bits.osrcrt)) || | 685 | np->rxopt.bits.osrcrt)) || |
@@ -737,8 +738,13 @@ static int __init init_ipv6_mibs(void) | |||
737 | if (snmp6_mib_init((void **)udp_stats_in6, sizeof (struct udp_mib), | 738 | if (snmp6_mib_init((void **)udp_stats_in6, sizeof (struct udp_mib), |
738 | __alignof__(struct udp_mib)) < 0) | 739 | __alignof__(struct udp_mib)) < 0) |
739 | goto err_udp_mib; | 740 | goto err_udp_mib; |
741 | if (snmp6_mib_init((void **)udplite_stats_in6, sizeof (struct udp_mib), | ||
742 | __alignof__(struct udp_mib)) < 0) | ||
743 | goto err_udplite_mib; | ||
740 | return 0; | 744 | return 0; |
741 | 745 | ||
746 | err_udplite_mib: | ||
747 | snmp6_mib_free((void **)udp_stats_in6); | ||
742 | err_udp_mib: | 748 | err_udp_mib: |
743 | snmp6_mib_free((void **)icmpv6_statistics); | 749 | snmp6_mib_free((void **)icmpv6_statistics); |
744 | err_icmp_mib: | 750 | err_icmp_mib: |
@@ -753,6 +759,7 @@ static void cleanup_ipv6_mibs(void) | |||
753 | snmp6_mib_free((void **)ipv6_statistics); | 759 | snmp6_mib_free((void **)ipv6_statistics); |
754 | snmp6_mib_free((void **)icmpv6_statistics); | 760 | snmp6_mib_free((void **)icmpv6_statistics); |
755 | snmp6_mib_free((void **)udp_stats_in6); | 761 | snmp6_mib_free((void **)udp_stats_in6); |
762 | snmp6_mib_free((void **)udplite_stats_in6); | ||
756 | } | 763 | } |
757 | 764 | ||
758 | static int __init inet6_init(void) | 765 | static int __init inet6_init(void) |
@@ -780,10 +787,14 @@ static int __init inet6_init(void) | |||
780 | if (err) | 787 | if (err) |
781 | goto out_unregister_tcp_proto; | 788 | goto out_unregister_tcp_proto; |
782 | 789 | ||
783 | err = proto_register(&rawv6_prot, 1); | 790 | err = proto_register(&udplitev6_prot, 1); |
784 | if (err) | 791 | if (err) |
785 | goto out_unregister_udp_proto; | 792 | goto out_unregister_udp_proto; |
786 | 793 | ||
794 | err = proto_register(&rawv6_prot, 1); | ||
795 | if (err) | ||
796 | goto out_unregister_udplite_proto; | ||
797 | |||
787 | 798 | ||
788 | /* Register the socket-side information for inet6_create. */ | 799 | /* Register the socket-side information for inet6_create. */ |
789 | for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) | 800 | for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) |
@@ -837,6 +848,8 @@ static int __init inet6_init(void) | |||
837 | goto proc_tcp6_fail; | 848 | goto proc_tcp6_fail; |
838 | if (udp6_proc_init()) | 849 | if (udp6_proc_init()) |
839 | goto proc_udp6_fail; | 850 | goto proc_udp6_fail; |
851 | if (udplite6_proc_init()) | ||
852 | goto proc_udplite6_fail; | ||
840 | if (ipv6_misc_proc_init()) | 853 | if (ipv6_misc_proc_init()) |
841 | goto proc_misc6_fail; | 854 | goto proc_misc6_fail; |
842 | 855 | ||
@@ -862,6 +875,7 @@ static int __init inet6_init(void) | |||
862 | 875 | ||
863 | /* Init v6 transport protocols. */ | 876 | /* Init v6 transport protocols. */ |
864 | udpv6_init(); | 877 | udpv6_init(); |
878 | udplitev6_init(); | ||
865 | tcpv6_init(); | 879 | tcpv6_init(); |
866 | 880 | ||
867 | ipv6_packet_init(); | 881 | ipv6_packet_init(); |
@@ -879,6 +893,8 @@ proc_if6_fail: | |||
879 | proc_anycast6_fail: | 893 | proc_anycast6_fail: |
880 | ipv6_misc_proc_exit(); | 894 | ipv6_misc_proc_exit(); |
881 | proc_misc6_fail: | 895 | proc_misc6_fail: |
896 | udplite6_proc_exit(); | ||
897 | proc_udplite6_fail: | ||
882 | udp6_proc_exit(); | 898 | udp6_proc_exit(); |
883 | proc_udp6_fail: | 899 | proc_udp6_fail: |
884 | tcp6_proc_exit(); | 900 | tcp6_proc_exit(); |
@@ -902,6 +918,8 @@ out_unregister_sock: | |||
902 | sock_unregister(PF_INET6); | 918 | sock_unregister(PF_INET6); |
903 | out_unregister_raw_proto: | 919 | out_unregister_raw_proto: |
904 | proto_unregister(&rawv6_prot); | 920 | proto_unregister(&rawv6_prot); |
921 | out_unregister_udplite_proto: | ||
922 | proto_unregister(&udplitev6_prot); | ||
905 | out_unregister_udp_proto: | 923 | out_unregister_udp_proto: |
906 | proto_unregister(&udpv6_prot); | 924 | proto_unregister(&udpv6_prot); |
907 | out_unregister_tcp_proto: | 925 | out_unregister_tcp_proto: |
@@ -919,6 +937,7 @@ static void __exit inet6_exit(void) | |||
919 | ac6_proc_exit(); | 937 | ac6_proc_exit(); |
920 | ipv6_misc_proc_exit(); | 938 | ipv6_misc_proc_exit(); |
921 | udp6_proc_exit(); | 939 | udp6_proc_exit(); |
940 | udplite6_proc_exit(); | ||
922 | tcp6_proc_exit(); | 941 | tcp6_proc_exit(); |
923 | raw6_proc_exit(); | 942 | raw6_proc_exit(); |
924 | #endif | 943 | #endif |
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index b0d83e8e4252..12c5a4dec09e 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -354,10 +354,9 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
354 | if (!pskb_may_pull(skb, ah_hlen)) | 354 | if (!pskb_may_pull(skb, ah_hlen)) |
355 | goto out; | 355 | goto out; |
356 | 356 | ||
357 | tmp_hdr = kmalloc(hdr_len, GFP_ATOMIC); | 357 | tmp_hdr = kmemdup(skb->nh.raw, hdr_len, GFP_ATOMIC); |
358 | if (!tmp_hdr) | 358 | if (!tmp_hdr) |
359 | goto out; | 359 | goto out; |
360 | memcpy(tmp_hdr, skb->nh.raw, hdr_len); | ||
361 | if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len, XFRM_POLICY_IN)) | 360 | if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len, XFRM_POLICY_IN)) |
362 | goto free_out; | 361 | goto free_out; |
363 | skb->nh.ipv6h->priority = 0; | 362 | skb->nh.ipv6h->priority = 0; |
@@ -397,7 +396,7 @@ out: | |||
397 | } | 396 | } |
398 | 397 | ||
399 | static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 398 | static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
400 | int type, int code, int offset, __u32 info) | 399 | int type, int code, int offset, __be32 info) |
401 | { | 400 | { |
402 | struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; | 401 | struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; |
403 | struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset); | 402 | struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset); |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 7206747022fc..5c94fea90e97 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -207,7 +207,7 @@ out: | |||
207 | } | 207 | } |
208 | 208 | ||
209 | void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, | 209 | void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, |
210 | u16 port, u32 info, u8 *payload) | 210 | __be16 port, u32 info, u8 *payload) |
211 | { | 211 | { |
212 | struct ipv6_pinfo *np = inet6_sk(sk); | 212 | struct ipv6_pinfo *np = inet6_sk(sk); |
213 | struct icmp6hdr *icmph = (struct icmp6hdr *)skb->h.raw; | 213 | struct icmp6hdr *icmph = (struct icmp6hdr *)skb->h.raw; |
@@ -318,13 +318,13 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
318 | ipv6_addr_copy(&sin->sin6_addr, | 318 | ipv6_addr_copy(&sin->sin6_addr, |
319 | (struct in6_addr *)(skb->nh.raw + serr->addr_offset)); | 319 | (struct in6_addr *)(skb->nh.raw + serr->addr_offset)); |
320 | if (np->sndflow) | 320 | if (np->sndflow) |
321 | sin->sin6_flowinfo = *(u32*)(skb->nh.raw + serr->addr_offset - 24) & IPV6_FLOWINFO_MASK; | 321 | sin->sin6_flowinfo = *(__be32*)(skb->nh.raw + serr->addr_offset - 24) & IPV6_FLOWINFO_MASK; |
322 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) | 322 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) |
323 | sin->sin6_scope_id = IP6CB(skb)->iif; | 323 | sin->sin6_scope_id = IP6CB(skb)->iif; |
324 | } else { | 324 | } else { |
325 | ipv6_addr_set(&sin->sin6_addr, 0, 0, | 325 | ipv6_addr_set(&sin->sin6_addr, 0, 0, |
326 | htonl(0xffff), | 326 | htonl(0xffff), |
327 | *(u32*)(skb->nh.raw + serr->addr_offset)); | 327 | *(__be32*)(skb->nh.raw + serr->addr_offset)); |
328 | } | 328 | } |
329 | } | 329 | } |
330 | 330 | ||
@@ -397,12 +397,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) | |||
397 | } | 397 | } |
398 | 398 | ||
399 | if (np->rxopt.bits.rxtclass) { | 399 | if (np->rxopt.bits.rxtclass) { |
400 | int tclass = (ntohl(*(u32 *)skb->nh.ipv6h) >> 20) & 0xff; | 400 | int tclass = (ntohl(*(__be32 *)skb->nh.ipv6h) >> 20) & 0xff; |
401 | put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass); | 401 | put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass); |
402 | } | 402 | } |
403 | 403 | ||
404 | if (np->rxopt.bits.rxflow && (*(u32*)skb->nh.raw & IPV6_FLOWINFO_MASK)) { | 404 | if (np->rxopt.bits.rxflow && (*(__be32*)skb->nh.raw & IPV6_FLOWINFO_MASK)) { |
405 | u32 flowinfo = *(u32*)skb->nh.raw & IPV6_FLOWINFO_MASK; | 405 | __be32 flowinfo = *(__be32*)skb->nh.raw & IPV6_FLOWINFO_MASK; |
406 | put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo); | 406 | put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo); |
407 | } | 407 | } |
408 | 408 | ||
@@ -560,12 +560,12 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl, | |||
560 | } | 560 | } |
561 | 561 | ||
562 | if (fl->fl6_flowlabel&IPV6_FLOWINFO_MASK) { | 562 | if (fl->fl6_flowlabel&IPV6_FLOWINFO_MASK) { |
563 | if ((fl->fl6_flowlabel^*(u32 *)CMSG_DATA(cmsg))&~IPV6_FLOWINFO_MASK) { | 563 | if ((fl->fl6_flowlabel^*(__be32 *)CMSG_DATA(cmsg))&~IPV6_FLOWINFO_MASK) { |
564 | err = -EINVAL; | 564 | err = -EINVAL; |
565 | goto exit_f; | 565 | goto exit_f; |
566 | } | 566 | } |
567 | } | 567 | } |
568 | fl->fl6_flowlabel = IPV6_FLOWINFO_MASK & *(u32 *)CMSG_DATA(cmsg); | 568 | fl->fl6_flowlabel = IPV6_FLOWINFO_MASK & *(__be32 *)CMSG_DATA(cmsg); |
569 | break; | 569 | break; |
570 | 570 | ||
571 | case IPV6_2292HOPOPTS: | 571 | case IPV6_2292HOPOPTS: |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index e78680a9985b..25dcf69cd807 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -256,7 +256,7 @@ static u32 esp6_get_max_size(struct xfrm_state *x, int mtu) | |||
256 | } | 256 | } |
257 | 257 | ||
258 | static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 258 | static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
259 | int type, int code, int offset, __u32 info) | 259 | int type, int code, int offset, __be32 info) |
260 | { | 260 | { |
261 | struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; | 261 | struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; |
262 | struct ipv6_esp_hdr *esph = (struct ipv6_esp_hdr*)(skb->data+offset); | 262 | struct ipv6_esp_hdr *esph = (struct ipv6_esp_hdr*)(skb->data+offset); |
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 88c96b10684c..0711f92d6a12 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -284,10 +284,12 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp) | |||
284 | #ifdef CONFIG_IPV6_MIP6 | 284 | #ifdef CONFIG_IPV6_MIP6 |
285 | __u16 dstbuf; | 285 | __u16 dstbuf; |
286 | #endif | 286 | #endif |
287 | struct dst_entry *dst; | ||
287 | 288 | ||
288 | if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) || | 289 | if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) || |
289 | !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) { | 290 | !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) { |
290 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 291 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
292 | IPSTATS_MIB_INHDRERRORS); | ||
291 | kfree_skb(skb); | 293 | kfree_skb(skb); |
292 | return -1; | 294 | return -1; |
293 | } | 295 | } |
@@ -298,7 +300,9 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp) | |||
298 | dstbuf = opt->dst1; | 300 | dstbuf = opt->dst1; |
299 | #endif | 301 | #endif |
300 | 302 | ||
303 | dst = dst_clone(skb->dst); | ||
301 | if (ip6_parse_tlv(tlvprocdestopt_lst, skbp)) { | 304 | if (ip6_parse_tlv(tlvprocdestopt_lst, skbp)) { |
305 | dst_release(dst); | ||
302 | skb = *skbp; | 306 | skb = *skbp; |
303 | skb->h.raw += ((skb->h.raw[1]+1)<<3); | 307 | skb->h.raw += ((skb->h.raw[1]+1)<<3); |
304 | opt = IP6CB(skb); | 308 | opt = IP6CB(skb); |
@@ -310,7 +314,8 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp) | |||
310 | return 1; | 314 | return 1; |
311 | } | 315 | } |
312 | 316 | ||
313 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 317 | IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); |
318 | dst_release(dst); | ||
314 | return -1; | 319 | return -1; |
315 | } | 320 | } |
316 | 321 | ||
@@ -365,7 +370,8 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp) | |||
365 | 370 | ||
366 | if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) || | 371 | if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) || |
367 | !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) { | 372 | !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) { |
368 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 373 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
374 | IPSTATS_MIB_INHDRERRORS); | ||
369 | kfree_skb(skb); | 375 | kfree_skb(skb); |
370 | return -1; | 376 | return -1; |
371 | } | 377 | } |
@@ -374,7 +380,8 @@ static int ipv6_rthdr_rcv(struct sk_buff **skbp) | |||
374 | 380 | ||
375 | if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr) || | 381 | if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr) || |
376 | skb->pkt_type != PACKET_HOST) { | 382 | skb->pkt_type != PACKET_HOST) { |
377 | IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); | 383 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
384 | IPSTATS_MIB_INADDRERRORS); | ||
378 | kfree_skb(skb); | 385 | kfree_skb(skb); |
379 | return -1; | 386 | return -1; |
380 | } | 387 | } |
@@ -388,7 +395,8 @@ looped_back: | |||
388 | * processed by own | 395 | * processed by own |
389 | */ | 396 | */ |
390 | if (!addr) { | 397 | if (!addr) { |
391 | IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); | 398 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
399 | IPSTATS_MIB_INADDRERRORS); | ||
392 | kfree_skb(skb); | 400 | kfree_skb(skb); |
393 | return -1; | 401 | return -1; |
394 | } | 402 | } |
@@ -410,7 +418,8 @@ looped_back: | |||
410 | switch (hdr->type) { | 418 | switch (hdr->type) { |
411 | case IPV6_SRCRT_TYPE_0: | 419 | case IPV6_SRCRT_TYPE_0: |
412 | if (hdr->hdrlen & 0x01) { | 420 | if (hdr->hdrlen & 0x01) { |
413 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 421 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
422 | IPSTATS_MIB_INHDRERRORS); | ||
414 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->hdrlen) - skb->nh.raw); | 423 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->hdrlen) - skb->nh.raw); |
415 | return -1; | 424 | return -1; |
416 | } | 425 | } |
@@ -419,14 +428,16 @@ looped_back: | |||
419 | case IPV6_SRCRT_TYPE_2: | 428 | case IPV6_SRCRT_TYPE_2: |
420 | /* Silently discard invalid RTH type 2 */ | 429 | /* Silently discard invalid RTH type 2 */ |
421 | if (hdr->hdrlen != 2 || hdr->segments_left != 1) { | 430 | if (hdr->hdrlen != 2 || hdr->segments_left != 1) { |
422 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 431 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
432 | IPSTATS_MIB_INHDRERRORS); | ||
423 | kfree_skb(skb); | 433 | kfree_skb(skb); |
424 | return -1; | 434 | return -1; |
425 | } | 435 | } |
426 | break; | 436 | break; |
427 | #endif | 437 | #endif |
428 | default: | 438 | default: |
429 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 439 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
440 | IPSTATS_MIB_INHDRERRORS); | ||
430 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw); | 441 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw); |
431 | return -1; | 442 | return -1; |
432 | } | 443 | } |
@@ -439,7 +450,8 @@ looped_back: | |||
439 | n = hdr->hdrlen >> 1; | 450 | n = hdr->hdrlen >> 1; |
440 | 451 | ||
441 | if (hdr->segments_left > n) { | 452 | if (hdr->segments_left > n) { |
442 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 453 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
454 | IPSTATS_MIB_INHDRERRORS); | ||
443 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->segments_left) - skb->nh.raw); | 455 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->segments_left) - skb->nh.raw); |
444 | return -1; | 456 | return -1; |
445 | } | 457 | } |
@@ -449,12 +461,14 @@ looped_back: | |||
449 | */ | 461 | */ |
450 | if (skb_cloned(skb)) { | 462 | if (skb_cloned(skb)) { |
451 | struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); | 463 | struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); |
452 | kfree_skb(skb); | ||
453 | /* the copy is a forwarded packet */ | 464 | /* the copy is a forwarded packet */ |
454 | if (skb2 == NULL) { | 465 | if (skb2 == NULL) { |
455 | IP6_INC_STATS_BH(IPSTATS_MIB_OUTDISCARDS); | 466 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
467 | IPSTATS_MIB_OUTDISCARDS); | ||
468 | kfree_skb(skb); | ||
456 | return -1; | 469 | return -1; |
457 | } | 470 | } |
471 | kfree_skb(skb); | ||
458 | *skbp = skb = skb2; | 472 | *skbp = skb = skb2; |
459 | opt = IP6CB(skb2); | 473 | opt = IP6CB(skb2); |
460 | hdr = (struct ipv6_rt_hdr *) skb2->h.raw; | 474 | hdr = (struct ipv6_rt_hdr *) skb2->h.raw; |
@@ -475,12 +489,14 @@ looped_back: | |||
475 | if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, | 489 | if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, |
476 | (xfrm_address_t *)&skb->nh.ipv6h->saddr, | 490 | (xfrm_address_t *)&skb->nh.ipv6h->saddr, |
477 | IPPROTO_ROUTING) < 0) { | 491 | IPPROTO_ROUTING) < 0) { |
478 | IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); | 492 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
493 | IPSTATS_MIB_INADDRERRORS); | ||
479 | kfree_skb(skb); | 494 | kfree_skb(skb); |
480 | return -1; | 495 | return -1; |
481 | } | 496 | } |
482 | if (!ipv6_chk_home_addr(addr)) { | 497 | if (!ipv6_chk_home_addr(addr)) { |
483 | IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); | 498 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
499 | IPSTATS_MIB_INADDRERRORS); | ||
484 | kfree_skb(skb); | 500 | kfree_skb(skb); |
485 | return -1; | 501 | return -1; |
486 | } | 502 | } |
@@ -491,7 +507,8 @@ looped_back: | |||
491 | } | 507 | } |
492 | 508 | ||
493 | if (ipv6_addr_is_multicast(addr)) { | 509 | if (ipv6_addr_is_multicast(addr)) { |
494 | IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); | 510 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
511 | IPSTATS_MIB_INADDRERRORS); | ||
495 | kfree_skb(skb); | 512 | kfree_skb(skb); |
496 | return -1; | 513 | return -1; |
497 | } | 514 | } |
@@ -510,7 +527,8 @@ looped_back: | |||
510 | 527 | ||
511 | if (skb->dst->dev->flags&IFF_LOOPBACK) { | 528 | if (skb->dst->dev->flags&IFF_LOOPBACK) { |
512 | if (skb->nh.ipv6h->hop_limit <= 1) { | 529 | if (skb->nh.ipv6h->hop_limit <= 1) { |
513 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 530 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
531 | IPSTATS_MIB_INHDRERRORS); | ||
514 | icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, | 532 | icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, |
515 | 0, skb->dev); | 533 | 0, skb->dev); |
516 | kfree_skb(skb); | 534 | kfree_skb(skb); |
@@ -632,24 +650,25 @@ static int ipv6_hop_jumbo(struct sk_buff **skbp, int optoff) | |||
632 | if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) { | 650 | if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) { |
633 | LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", | 651 | LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", |
634 | skb->nh.raw[optoff+1]); | 652 | skb->nh.raw[optoff+1]); |
635 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 653 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
654 | IPSTATS_MIB_INHDRERRORS); | ||
636 | goto drop; | 655 | goto drop; |
637 | } | 656 | } |
638 | 657 | ||
639 | pkt_len = ntohl(*(u32*)(skb->nh.raw+optoff+2)); | 658 | pkt_len = ntohl(*(__be32*)(skb->nh.raw+optoff+2)); |
640 | if (pkt_len <= IPV6_MAXPLEN) { | 659 | if (pkt_len <= IPV6_MAXPLEN) { |
641 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 660 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); |
642 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); | 661 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); |
643 | return 0; | 662 | return 0; |
644 | } | 663 | } |
645 | if (skb->nh.ipv6h->payload_len) { | 664 | if (skb->nh.ipv6h->payload_len) { |
646 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 665 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); |
647 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); | 666 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); |
648 | return 0; | 667 | return 0; |
649 | } | 668 | } |
650 | 669 | ||
651 | if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { | 670 | if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { |
652 | IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS); | 671 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INTRUNCATEDPKTS); |
653 | goto drop; | 672 | goto drop; |
654 | } | 673 | } |
655 | 674 | ||
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c index 315bc1fbec3f..21cbbbddaf4d 100644 --- a/net/ipv6/exthdrs_core.c +++ b/net/ipv6/exthdrs_core.c | |||
@@ -77,7 +77,7 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp) | |||
77 | if (hp == NULL) | 77 | if (hp == NULL) |
78 | return -1; | 78 | return -1; |
79 | if (nexthdr == NEXTHDR_FRAGMENT) { | 79 | if (nexthdr == NEXTHDR_FRAGMENT) { |
80 | unsigned short _frag_off, *fp; | 80 | __be16 _frag_off, *fp; |
81 | fp = skb_header_pointer(skb, | 81 | fp = skb_header_pointer(skb, |
82 | start+offsetof(struct frag_hdr, | 82 | start+offsetof(struct frag_hdr, |
83 | frag_off), | 83 | frag_off), |
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 1896ecb52899..0862809ffcf7 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -25,10 +25,6 @@ struct fib6_rule | |||
25 | struct fib_rule common; | 25 | struct fib_rule common; |
26 | struct rt6key src; | 26 | struct rt6key src; |
27 | struct rt6key dst; | 27 | struct rt6key dst; |
28 | #ifdef CONFIG_IPV6_ROUTE_FWMARK | ||
29 | u32 fwmark; | ||
30 | u32 fwmask; | ||
31 | #endif | ||
32 | u8 tclass; | 28 | u8 tclass; |
33 | }; | 29 | }; |
34 | 30 | ||
@@ -67,7 +63,7 @@ struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags, | |||
67 | fib_rule_put(arg.rule); | 63 | fib_rule_put(arg.rule); |
68 | 64 | ||
69 | if (arg.result) | 65 | if (arg.result) |
70 | return (struct dst_entry *) arg.result; | 66 | return arg.result; |
71 | 67 | ||
72 | dst_hold(&ip6_null_entry.u.dst); | 68 | dst_hold(&ip6_null_entry.u.dst); |
73 | return &ip6_null_entry.u.dst; | 69 | return &ip6_null_entry.u.dst; |
@@ -130,22 +126,13 @@ static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) | |||
130 | if (r->tclass && r->tclass != ((ntohl(fl->fl6_flowlabel) >> 20) & 0xff)) | 126 | if (r->tclass && r->tclass != ((ntohl(fl->fl6_flowlabel) >> 20) & 0xff)) |
131 | return 0; | 127 | return 0; |
132 | 128 | ||
133 | #ifdef CONFIG_IPV6_ROUTE_FWMARK | ||
134 | if ((r->fwmark ^ fl->fl6_fwmark) & r->fwmask) | ||
135 | return 0; | ||
136 | #endif | ||
137 | |||
138 | return 1; | 129 | return 1; |
139 | } | 130 | } |
140 | 131 | ||
141 | static struct nla_policy fib6_rule_policy[FRA_MAX+1] __read_mostly = { | 132 | static struct nla_policy fib6_rule_policy[FRA_MAX+1] __read_mostly = { |
142 | [FRA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, | 133 | FRA_GENERIC_POLICY, |
143 | [FRA_PRIORITY] = { .type = NLA_U32 }, | ||
144 | [FRA_SRC] = { .len = sizeof(struct in6_addr) }, | 134 | [FRA_SRC] = { .len = sizeof(struct in6_addr) }, |
145 | [FRA_DST] = { .len = sizeof(struct in6_addr) }, | 135 | [FRA_DST] = { .len = sizeof(struct in6_addr) }, |
146 | [FRA_FWMARK] = { .type = NLA_U32 }, | ||
147 | [FRA_FWMASK] = { .type = NLA_U32 }, | ||
148 | [FRA_TABLE] = { .type = NLA_U32 }, | ||
149 | }; | 136 | }; |
150 | 137 | ||
151 | static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, | 138 | static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, |
@@ -155,8 +142,7 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, | |||
155 | int err = -EINVAL; | 142 | int err = -EINVAL; |
156 | struct fib6_rule *rule6 = (struct fib6_rule *) rule; | 143 | struct fib6_rule *rule6 = (struct fib6_rule *) rule; |
157 | 144 | ||
158 | if (frh->src_len > 128 || frh->dst_len > 128 || | 145 | if (frh->src_len > 128 || frh->dst_len > 128) |
159 | (frh->tos & ~IPV6_FLOWINFO_MASK)) | ||
160 | goto errout; | 146 | goto errout; |
161 | 147 | ||
162 | if (rule->action == FR_ACT_TO_TBL) { | 148 | if (rule->action == FR_ACT_TO_TBL) { |
@@ -177,23 +163,6 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, | |||
177 | nla_memcpy(&rule6->dst.addr, tb[FRA_DST], | 163 | nla_memcpy(&rule6->dst.addr, tb[FRA_DST], |
178 | sizeof(struct in6_addr)); | 164 | sizeof(struct in6_addr)); |
179 | 165 | ||
180 | #ifdef CONFIG_IPV6_ROUTE_FWMARK | ||
181 | if (tb[FRA_FWMARK]) { | ||
182 | rule6->fwmark = nla_get_u32(tb[FRA_FWMARK]); | ||
183 | if (rule6->fwmark) { | ||
184 | /* | ||
185 | * if the mark value is non-zero, | ||
186 | * all bits are compared by default | ||
187 | * unless a mask is explicitly specified. | ||
188 | */ | ||
189 | rule6->fwmask = 0xFFFFFFFF; | ||
190 | } | ||
191 | } | ||
192 | |||
193 | if (tb[FRA_FWMASK]) | ||
194 | rule6->fwmask = nla_get_u32(tb[FRA_FWMASK]); | ||
195 | #endif | ||
196 | |||
197 | rule6->src.plen = frh->src_len; | 166 | rule6->src.plen = frh->src_len; |
198 | rule6->dst.plen = frh->dst_len; | 167 | rule6->dst.plen = frh->dst_len; |
199 | rule6->tclass = frh->tos; | 168 | rule6->tclass = frh->tos; |
@@ -225,14 +194,6 @@ static int fib6_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, | |||
225 | nla_memcmp(tb[FRA_DST], &rule6->dst.addr, sizeof(struct in6_addr))) | 194 | nla_memcmp(tb[FRA_DST], &rule6->dst.addr, sizeof(struct in6_addr))) |
226 | return 0; | 195 | return 0; |
227 | 196 | ||
228 | #ifdef CONFIG_IPV6_ROUTE_FWMARK | ||
229 | if (tb[FRA_FWMARK] && (rule6->fwmark != nla_get_u32(tb[FRA_FWMARK]))) | ||
230 | return 0; | ||
231 | |||
232 | if (tb[FRA_FWMASK] && (rule6->fwmask != nla_get_u32(tb[FRA_FWMASK]))) | ||
233 | return 0; | ||
234 | #endif | ||
235 | |||
236 | return 1; | 197 | return 1; |
237 | } | 198 | } |
238 | 199 | ||
@@ -254,14 +215,6 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb, | |||
254 | NLA_PUT(skb, FRA_SRC, sizeof(struct in6_addr), | 215 | NLA_PUT(skb, FRA_SRC, sizeof(struct in6_addr), |
255 | &rule6->src.addr); | 216 | &rule6->src.addr); |
256 | 217 | ||
257 | #ifdef CONFIG_IPV6_ROUTE_FWMARK | ||
258 | if (rule6->fwmark) | ||
259 | NLA_PUT_U32(skb, FRA_FWMARK, rule6->fwmark); | ||
260 | |||
261 | if (rule6->fwmask || rule6->fwmark) | ||
262 | NLA_PUT_U32(skb, FRA_FWMASK, rule6->fwmask); | ||
263 | #endif | ||
264 | |||
265 | return 0; | 218 | return 0; |
266 | 219 | ||
267 | nla_put_failure: | 220 | nla_put_failure: |
@@ -278,6 +231,12 @@ static u32 fib6_rule_default_pref(void) | |||
278 | return 0x3FFF; | 231 | return 0x3FFF; |
279 | } | 232 | } |
280 | 233 | ||
234 | static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule) | ||
235 | { | ||
236 | return nla_total_size(16) /* dst */ | ||
237 | + nla_total_size(16); /* src */ | ||
238 | } | ||
239 | |||
281 | static struct fib_rules_ops fib6_rules_ops = { | 240 | static struct fib_rules_ops fib6_rules_ops = { |
282 | .family = AF_INET6, | 241 | .family = AF_INET6, |
283 | .rule_size = sizeof(struct fib6_rule), | 242 | .rule_size = sizeof(struct fib6_rule), |
@@ -287,6 +246,7 @@ static struct fib_rules_ops fib6_rules_ops = { | |||
287 | .compare = fib6_rule_compare, | 246 | .compare = fib6_rule_compare, |
288 | .fill = fib6_rule_fill, | 247 | .fill = fib6_rule_fill, |
289 | .default_pref = fib6_rule_default_pref, | 248 | .default_pref = fib6_rule_default_pref, |
249 | .nlmsg_payload = fib6_rule_nlmsg_payload, | ||
290 | .nlgroup = RTNLGRP_IPV6_RULE, | 250 | .nlgroup = RTNLGRP_IPV6_RULE, |
291 | .policy = fib6_rule_policy, | 251 | .policy = fib6_rule_policy, |
292 | .rules_list = &fib6_rules, | 252 | .rules_list = &fib6_rules, |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 4ec876066b3f..3dcc4b7f41b4 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -177,7 +177,8 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, int type, | |||
177 | */ | 177 | */ |
178 | dst = ip6_route_output(sk, fl); | 178 | dst = ip6_route_output(sk, fl); |
179 | if (dst->error) { | 179 | if (dst->error) { |
180 | IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES); | 180 | IP6_INC_STATS(ip6_dst_idev(dst), |
181 | IPSTATS_MIB_OUTNOROUTES); | ||
181 | } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) { | 182 | } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) { |
182 | res = 1; | 183 | res = 1; |
183 | } else { | 184 | } else { |
@@ -233,7 +234,7 @@ static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct | |||
233 | len, fl->proto, | 234 | len, fl->proto, |
234 | skb->csum); | 235 | skb->csum); |
235 | } else { | 236 | } else { |
236 | u32 tmp_csum = 0; | 237 | __wsum tmp_csum = 0; |
237 | 238 | ||
238 | skb_queue_walk(&sk->sk_write_queue, skb) { | 239 | skb_queue_walk(&sk->sk_write_queue, skb) { |
239 | tmp_csum = csum_add(tmp_csum, skb->csum); | 240 | tmp_csum = csum_add(tmp_csum, skb->csum); |
@@ -241,13 +242,11 @@ static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct | |||
241 | 242 | ||
242 | tmp_csum = csum_partial((char *)icmp6h, | 243 | tmp_csum = csum_partial((char *)icmp6h, |
243 | sizeof(struct icmp6hdr), tmp_csum); | 244 | sizeof(struct icmp6hdr), tmp_csum); |
244 | tmp_csum = csum_ipv6_magic(&fl->fl6_src, | 245 | icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src, |
245 | &fl->fl6_dst, | 246 | &fl->fl6_dst, |
246 | len, fl->proto, tmp_csum); | 247 | len, fl->proto, |
247 | icmp6h->icmp6_cksum = tmp_csum; | 248 | tmp_csum); |
248 | } | 249 | } |
249 | if (icmp6h->icmp6_cksum == 0) | ||
250 | icmp6h->icmp6_cksum = -1; | ||
251 | ip6_push_pending_frames(sk); | 250 | ip6_push_pending_frames(sk); |
252 | out: | 251 | out: |
253 | return err; | 252 | return err; |
@@ -263,7 +262,7 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st | |||
263 | { | 262 | { |
264 | struct icmpv6_msg *msg = (struct icmpv6_msg *) from; | 263 | struct icmpv6_msg *msg = (struct icmpv6_msg *) from; |
265 | struct sk_buff *org_skb = msg->skb; | 264 | struct sk_buff *org_skb = msg->skb; |
266 | __u32 csum = 0; | 265 | __wsum csum = 0; |
267 | 266 | ||
268 | csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset, | 267 | csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset, |
269 | to, len, csum); | 268 | to, len, csum); |
@@ -555,7 +554,7 @@ out: | |||
555 | icmpv6_xmit_unlock(); | 554 | icmpv6_xmit_unlock(); |
556 | } | 555 | } |
557 | 556 | ||
558 | static void icmpv6_notify(struct sk_buff *skb, int type, int code, u32 info) | 557 | static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info) |
559 | { | 558 | { |
560 | struct in6_addr *saddr, *daddr; | 559 | struct in6_addr *saddr, *daddr; |
561 | struct inet6_protocol *ipprot; | 560 | struct inet6_protocol *ipprot; |
@@ -637,8 +636,8 @@ static int icmpv6_rcv(struct sk_buff **pskb) | |||
637 | break; | 636 | break; |
638 | /* fall through */ | 637 | /* fall through */ |
639 | case CHECKSUM_NONE: | 638 | case CHECKSUM_NONE: |
640 | skb->csum = ~csum_ipv6_magic(saddr, daddr, skb->len, | 639 | skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len, |
641 | IPPROTO_ICMPV6, 0); | 640 | IPPROTO_ICMPV6, 0)); |
642 | if (__skb_checksum_complete(skb)) { | 641 | if (__skb_checksum_complete(skb)) { |
643 | LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n", | 642 | LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n", |
644 | NIP6(*saddr), NIP6(*daddr)); | 643 | NIP6(*saddr), NIP6(*daddr)); |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 827f41d1478b..c700302ad51a 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -52,20 +52,20 @@ EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); | |||
52 | /* | 52 | /* |
53 | * request_sock (formerly open request) hash tables. | 53 | * request_sock (formerly open request) hash tables. |
54 | */ | 54 | */ |
55 | static u32 inet6_synq_hash(const struct in6_addr *raddr, const u16 rport, | 55 | static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, |
56 | const u32 rnd, const u16 synq_hsize) | 56 | const u32 rnd, const u16 synq_hsize) |
57 | { | 57 | { |
58 | u32 a = raddr->s6_addr32[0]; | 58 | u32 a = (__force u32)raddr->s6_addr32[0]; |
59 | u32 b = raddr->s6_addr32[1]; | 59 | u32 b = (__force u32)raddr->s6_addr32[1]; |
60 | u32 c = raddr->s6_addr32[2]; | 60 | u32 c = (__force u32)raddr->s6_addr32[2]; |
61 | 61 | ||
62 | a += JHASH_GOLDEN_RATIO; | 62 | a += JHASH_GOLDEN_RATIO; |
63 | b += JHASH_GOLDEN_RATIO; | 63 | b += JHASH_GOLDEN_RATIO; |
64 | c += rnd; | 64 | c += rnd; |
65 | __jhash_mix(a, b, c); | 65 | __jhash_mix(a, b, c); |
66 | 66 | ||
67 | a += raddr->s6_addr32[3]; | 67 | a += (__force u32)raddr->s6_addr32[3]; |
68 | b += (u32)rport; | 68 | b += (__force u32)rport; |
69 | __jhash_mix(a, b, c); | 69 | __jhash_mix(a, b, c); |
70 | 70 | ||
71 | return c & (synq_hsize - 1); | 71 | return c & (synq_hsize - 1); |
@@ -73,7 +73,7 @@ static u32 inet6_synq_hash(const struct in6_addr *raddr, const u16 rport, | |||
73 | 73 | ||
74 | struct request_sock *inet6_csk_search_req(const struct sock *sk, | 74 | struct request_sock *inet6_csk_search_req(const struct sock *sk, |
75 | struct request_sock ***prevp, | 75 | struct request_sock ***prevp, |
76 | const __u16 rport, | 76 | const __be16 rport, |
77 | const struct in6_addr *raddr, | 77 | const struct in6_addr *raddr, |
78 | const struct in6_addr *laddr, | 78 | const struct in6_addr *laddr, |
79 | const int iif) | 79 | const int iif) |
@@ -139,9 +139,8 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) | |||
139 | 139 | ||
140 | EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); | 140 | EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); |
141 | 141 | ||
142 | int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) | 142 | int inet6_csk_xmit(struct sk_buff *skb, struct sock *sk, int ipfragok) |
143 | { | 143 | { |
144 | struct sock *sk = skb->sk; | ||
145 | struct inet_sock *inet = inet_sk(sk); | 144 | struct inet_sock *inet = inet_sk(sk); |
146 | struct ipv6_pinfo *np = inet6_sk(sk); | 145 | struct ipv6_pinfo *np = inet6_sk(sk); |
147 | struct flowi fl; | 146 | struct flowi fl; |
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 8accd1fbeeda..b7e5bae0e347 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
@@ -57,7 +57,7 @@ EXPORT_SYMBOL(__inet6_hash); | |||
57 | */ | 57 | */ |
58 | struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo, | 58 | struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo, |
59 | const struct in6_addr *saddr, | 59 | const struct in6_addr *saddr, |
60 | const u16 sport, | 60 | const __be16 sport, |
61 | const struct in6_addr *daddr, | 61 | const struct in6_addr *daddr, |
62 | const u16 hnum, | 62 | const u16 hnum, |
63 | const int dif) | 63 | const int dif) |
@@ -146,8 +146,8 @@ struct sock *inet6_lookup_listener(struct inet_hashinfo *hashinfo, | |||
146 | EXPORT_SYMBOL_GPL(inet6_lookup_listener); | 146 | EXPORT_SYMBOL_GPL(inet6_lookup_listener); |
147 | 147 | ||
148 | struct sock *inet6_lookup(struct inet_hashinfo *hashinfo, | 148 | struct sock *inet6_lookup(struct inet_hashinfo *hashinfo, |
149 | const struct in6_addr *saddr, const u16 sport, | 149 | const struct in6_addr *saddr, const __be16 sport, |
150 | const struct in6_addr *daddr, const u16 dport, | 150 | const struct in6_addr *daddr, const __be16 dport, |
151 | const int dif) | 151 | const int dif) |
152 | { | 152 | { |
153 | struct sock *sk; | 153 | struct sock *sk; |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index f98ca30d7c1f..bf526115e518 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -139,9 +139,9 @@ static __inline__ u32 fib6_new_sernum(void) | |||
139 | * test bit | 139 | * test bit |
140 | */ | 140 | */ |
141 | 141 | ||
142 | static __inline__ int addr_bit_set(void *token, int fn_bit) | 142 | static __inline__ __be32 addr_bit_set(void *token, int fn_bit) |
143 | { | 143 | { |
144 | __u32 *addr = token; | 144 | __be32 *addr = token; |
145 | 145 | ||
146 | return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5]; | 146 | return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5]; |
147 | } | 147 | } |
@@ -434,7 +434,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, | |||
434 | struct fib6_node *pn = NULL; | 434 | struct fib6_node *pn = NULL; |
435 | struct rt6key *key; | 435 | struct rt6key *key; |
436 | int bit; | 436 | int bit; |
437 | int dir = 0; | 437 | __be32 dir = 0; |
438 | __u32 sernum = fib6_new_sernum(); | 438 | __u32 sernum = fib6_new_sernum(); |
439 | 439 | ||
440 | RT6_TRACE("fib6_add_1\n"); | 440 | RT6_TRACE("fib6_add_1\n"); |
@@ -829,7 +829,7 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root, | |||
829 | struct lookup_args *args) | 829 | struct lookup_args *args) |
830 | { | 830 | { |
831 | struct fib6_node *fn; | 831 | struct fib6_node *fn; |
832 | int dir; | 832 | __be32 dir; |
833 | 833 | ||
834 | if (unlikely(args->offset == 0)) | 834 | if (unlikely(args->offset == 0)) |
835 | return NULL; | 835 | return NULL; |
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 6d4533b58dca..624fae251f4e 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c | |||
@@ -61,7 +61,7 @@ static DEFINE_RWLOCK(ip6_fl_lock); | |||
61 | static DEFINE_RWLOCK(ip6_sk_fl_lock); | 61 | static DEFINE_RWLOCK(ip6_sk_fl_lock); |
62 | 62 | ||
63 | 63 | ||
64 | static __inline__ struct ip6_flowlabel * __fl_lookup(u32 label) | 64 | static __inline__ struct ip6_flowlabel * __fl_lookup(__be32 label) |
65 | { | 65 | { |
66 | struct ip6_flowlabel *fl; | 66 | struct ip6_flowlabel *fl; |
67 | 67 | ||
@@ -72,7 +72,7 @@ static __inline__ struct ip6_flowlabel * __fl_lookup(u32 label) | |||
72 | return NULL; | 72 | return NULL; |
73 | } | 73 | } |
74 | 74 | ||
75 | static struct ip6_flowlabel * fl_lookup(u32 label) | 75 | static struct ip6_flowlabel * fl_lookup(__be32 label) |
76 | { | 76 | { |
77 | struct ip6_flowlabel *fl; | 77 | struct ip6_flowlabel *fl; |
78 | 78 | ||
@@ -153,7 +153,7 @@ static void ip6_fl_gc(unsigned long dummy) | |||
153 | write_unlock(&ip6_fl_lock); | 153 | write_unlock(&ip6_fl_lock); |
154 | } | 154 | } |
155 | 155 | ||
156 | static int fl_intern(struct ip6_flowlabel *fl, __u32 label) | 156 | static int fl_intern(struct ip6_flowlabel *fl, __be32 label) |
157 | { | 157 | { |
158 | fl->label = label & IPV6_FLOWLABEL_MASK; | 158 | fl->label = label & IPV6_FLOWLABEL_MASK; |
159 | 159 | ||
@@ -182,7 +182,7 @@ static int fl_intern(struct ip6_flowlabel *fl, __u32 label) | |||
182 | 182 | ||
183 | /* Socket flowlabel lists */ | 183 | /* Socket flowlabel lists */ |
184 | 184 | ||
185 | struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, u32 label) | 185 | struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label) |
186 | { | 186 | { |
187 | struct ipv6_fl_socklist *sfl; | 187 | struct ipv6_fl_socklist *sfl; |
188 | struct ipv6_pinfo *np = inet6_sk(sk); | 188 | struct ipv6_pinfo *np = inet6_sk(sk); |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 6b8e6d76a58b..ad0b8abcdf4b 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -60,14 +60,22 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
60 | { | 60 | { |
61 | struct ipv6hdr *hdr; | 61 | struct ipv6hdr *hdr; |
62 | u32 pkt_len; | 62 | u32 pkt_len; |
63 | struct inet6_dev *idev; | ||
63 | 64 | ||
64 | if (skb->pkt_type == PACKET_OTHERHOST) | 65 | if (skb->pkt_type == PACKET_OTHERHOST) { |
65 | goto drop; | 66 | kfree_skb(skb); |
67 | return 0; | ||
68 | } | ||
69 | |||
70 | rcu_read_lock(); | ||
66 | 71 | ||
67 | IP6_INC_STATS_BH(IPSTATS_MIB_INRECEIVES); | 72 | idev = __in6_dev_get(skb->dev); |
73 | |||
74 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INRECEIVES); | ||
68 | 75 | ||
69 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { | 76 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { |
70 | IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); | 77 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS); |
78 | rcu_read_unlock(); | ||
71 | goto out; | 79 | goto out; |
72 | } | 80 | } |
73 | 81 | ||
@@ -84,7 +92,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
84 | * arrived via the sending interface (ethX), because of the | 92 | * arrived via the sending interface (ethX), because of the |
85 | * nature of scoping architecture. --yoshfuji | 93 | * nature of scoping architecture. --yoshfuji |
86 | */ | 94 | */ |
87 | IP6CB(skb)->iif = skb->dst ? ((struct rt6_info *)skb->dst)->rt6i_idev->dev->ifindex : dev->ifindex; | 95 | IP6CB(skb)->iif = skb->dst ? ip6_dst_idev(skb->dst)->dev->ifindex : dev->ifindex; |
88 | 96 | ||
89 | if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) | 97 | if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) |
90 | goto err; | 98 | goto err; |
@@ -104,7 +112,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
104 | if (pkt_len + sizeof(struct ipv6hdr) > skb->len) | 112 | if (pkt_len + sizeof(struct ipv6hdr) > skb->len) |
105 | goto truncated; | 113 | goto truncated; |
106 | if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { | 114 | if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { |
107 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 115 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS); |
108 | goto drop; | 116 | goto drop; |
109 | } | 117 | } |
110 | hdr = skb->nh.ipv6h; | 118 | hdr = skb->nh.ipv6h; |
@@ -112,17 +120,21 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
112 | 120 | ||
113 | if (hdr->nexthdr == NEXTHDR_HOP) { | 121 | if (hdr->nexthdr == NEXTHDR_HOP) { |
114 | if (ipv6_parse_hopopts(&skb) < 0) { | 122 | if (ipv6_parse_hopopts(&skb) < 0) { |
115 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 123 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS); |
124 | rcu_read_unlock(); | ||
116 | return 0; | 125 | return 0; |
117 | } | 126 | } |
118 | } | 127 | } |
119 | 128 | ||
129 | rcu_read_unlock(); | ||
130 | |||
120 | return NF_HOOK(PF_INET6,NF_IP6_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish); | 131 | return NF_HOOK(PF_INET6,NF_IP6_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish); |
121 | truncated: | 132 | truncated: |
122 | IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS); | 133 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INTRUNCATEDPKTS); |
123 | err: | 134 | err: |
124 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 135 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS); |
125 | drop: | 136 | drop: |
137 | rcu_read_unlock(); | ||
126 | kfree_skb(skb); | 138 | kfree_skb(skb); |
127 | out: | 139 | out: |
128 | return 0; | 140 | return 0; |
@@ -140,6 +152,7 @@ static inline int ip6_input_finish(struct sk_buff *skb) | |||
140 | unsigned int nhoff; | 152 | unsigned int nhoff; |
141 | int nexthdr; | 153 | int nexthdr; |
142 | u8 hash; | 154 | u8 hash; |
155 | struct inet6_dev *idev; | ||
143 | 156 | ||
144 | /* | 157 | /* |
145 | * Parse extension headers | 158 | * Parse extension headers |
@@ -147,6 +160,7 @@ static inline int ip6_input_finish(struct sk_buff *skb) | |||
147 | 160 | ||
148 | rcu_read_lock(); | 161 | rcu_read_lock(); |
149 | resubmit: | 162 | resubmit: |
163 | idev = ip6_dst_idev(skb->dst); | ||
150 | if (!pskb_pull(skb, skb->h.raw - skb->data)) | 164 | if (!pskb_pull(skb, skb->h.raw - skb->data)) |
151 | goto discard; | 165 | goto discard; |
152 | nhoff = IP6CB(skb)->nhoff; | 166 | nhoff = IP6CB(skb)->nhoff; |
@@ -185,24 +199,24 @@ resubmit: | |||
185 | if (ret > 0) | 199 | if (ret > 0) |
186 | goto resubmit; | 200 | goto resubmit; |
187 | else if (ret == 0) | 201 | else if (ret == 0) |
188 | IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS); | 202 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS); |
189 | } else { | 203 | } else { |
190 | if (!raw_sk) { | 204 | if (!raw_sk) { |
191 | if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 205 | if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
192 | IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS); | 206 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INUNKNOWNPROTOS); |
193 | icmpv6_send(skb, ICMPV6_PARAMPROB, | 207 | icmpv6_send(skb, ICMPV6_PARAMPROB, |
194 | ICMPV6_UNK_NEXTHDR, nhoff, | 208 | ICMPV6_UNK_NEXTHDR, nhoff, |
195 | skb->dev); | 209 | skb->dev); |
196 | } | 210 | } |
197 | } else | 211 | } else |
198 | IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS); | 212 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS); |
199 | kfree_skb(skb); | 213 | kfree_skb(skb); |
200 | } | 214 | } |
201 | rcu_read_unlock(); | 215 | rcu_read_unlock(); |
202 | return 0; | 216 | return 0; |
203 | 217 | ||
204 | discard: | 218 | discard: |
205 | IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); | 219 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS); |
206 | rcu_read_unlock(); | 220 | rcu_read_unlock(); |
207 | kfree_skb(skb); | 221 | kfree_skb(skb); |
208 | return 0; | 222 | return 0; |
@@ -219,7 +233,7 @@ int ip6_mc_input(struct sk_buff *skb) | |||
219 | struct ipv6hdr *hdr; | 233 | struct ipv6hdr *hdr; |
220 | int deliver; | 234 | int deliver; |
221 | 235 | ||
222 | IP6_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS); | 236 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS); |
223 | 237 | ||
224 | hdr = skb->nh.ipv6h; | 238 | hdr = skb->nh.ipv6h; |
225 | deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) || | 239 | deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) || |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 66716911962e..e05ecbb1412d 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -88,7 +88,7 @@ static inline int ip6_output_finish(struct sk_buff *skb) | |||
88 | } else if (dst->neighbour) | 88 | } else if (dst->neighbour) |
89 | return dst->neighbour->output(skb); | 89 | return dst->neighbour->output(skb); |
90 | 90 | ||
91 | IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); | 91 | IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); |
92 | kfree_skb(skb); | 92 | kfree_skb(skb); |
93 | return -EINVAL; | 93 | return -EINVAL; |
94 | 94 | ||
@@ -118,6 +118,7 @@ static int ip6_output2(struct sk_buff *skb) | |||
118 | 118 | ||
119 | if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) { | 119 | if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) { |
120 | struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL; | 120 | struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL; |
121 | struct inet6_dev *idev = ip6_dst_idev(skb->dst); | ||
121 | 122 | ||
122 | if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) && | 123 | if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) && |
123 | ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr, | 124 | ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr, |
@@ -133,13 +134,13 @@ static int ip6_output2(struct sk_buff *skb) | |||
133 | ip6_dev_loopback_xmit); | 134 | ip6_dev_loopback_xmit); |
134 | 135 | ||
135 | if (skb->nh.ipv6h->hop_limit == 0) { | 136 | if (skb->nh.ipv6h->hop_limit == 0) { |
136 | IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 137 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS); |
137 | kfree_skb(skb); | 138 | kfree_skb(skb); |
138 | return 0; | 139 | return 0; |
139 | } | 140 | } |
140 | } | 141 | } |
141 | 142 | ||
142 | IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS); | 143 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS); |
143 | } | 144 | } |
144 | 145 | ||
145 | return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish); | 146 | return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish); |
@@ -182,12 +183,14 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
182 | 183 | ||
183 | if (skb_headroom(skb) < head_room) { | 184 | if (skb_headroom(skb) < head_room) { |
184 | struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); | 185 | struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); |
185 | kfree_skb(skb); | 186 | if (skb2 == NULL) { |
186 | skb = skb2; | 187 | IP6_INC_STATS(ip6_dst_idev(skb->dst), |
187 | if (skb == NULL) { | 188 | IPSTATS_MIB_OUTDISCARDS); |
188 | IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 189 | kfree_skb(skb); |
189 | return -ENOBUFS; | 190 | return -ENOBUFS; |
190 | } | 191 | } |
192 | kfree_skb(skb); | ||
193 | skb = skb2; | ||
191 | if (sk) | 194 | if (sk) |
192 | skb_set_owner_w(skb, sk); | 195 | skb_set_owner_w(skb, sk); |
193 | } | 196 | } |
@@ -217,7 +220,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
217 | if (tclass < 0) | 220 | if (tclass < 0) |
218 | tclass = 0; | 221 | tclass = 0; |
219 | 222 | ||
220 | *(u32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel; | 223 | *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel; |
221 | 224 | ||
222 | hdr->payload_len = htons(seg_len); | 225 | hdr->payload_len = htons(seg_len); |
223 | hdr->nexthdr = proto; | 226 | hdr->nexthdr = proto; |
@@ -230,7 +233,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
230 | 233 | ||
231 | mtu = dst_mtu(dst); | 234 | mtu = dst_mtu(dst); |
232 | if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) { | 235 | if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) { |
233 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 236 | IP6_INC_STATS(ip6_dst_idev(skb->dst), |
237 | IPSTATS_MIB_OUTREQUESTS); | ||
234 | return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, | 238 | return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, |
235 | dst_output); | 239 | dst_output); |
236 | } | 240 | } |
@@ -239,7 +243,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, | |||
239 | printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); | 243 | printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); |
240 | skb->dev = dst->dev; | 244 | skb->dev = dst->dev; |
241 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); | 245 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); |
242 | IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); | 246 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); |
243 | kfree_skb(skb); | 247 | kfree_skb(skb); |
244 | return -EMSGSIZE; | 248 | return -EMSGSIZE; |
245 | } | 249 | } |
@@ -267,7 +271,7 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev, | |||
267 | hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr)); | 271 | hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr)); |
268 | skb->nh.ipv6h = hdr; | 272 | skb->nh.ipv6h = hdr; |
269 | 273 | ||
270 | *(u32*)hdr = htonl(0x60000000); | 274 | *(__be32*)hdr = htonl(0x60000000); |
271 | 275 | ||
272 | hdr->payload_len = htons(len); | 276 | hdr->payload_len = htons(len); |
273 | hdr->nexthdr = proto; | 277 | hdr->nexthdr = proto; |
@@ -373,7 +377,7 @@ int ip6_forward(struct sk_buff *skb) | |||
373 | goto error; | 377 | goto error; |
374 | 378 | ||
375 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { | 379 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { |
376 | IP6_INC_STATS(IPSTATS_MIB_INDISCARDS); | 380 | IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); |
377 | goto drop; | 381 | goto drop; |
378 | } | 382 | } |
379 | 383 | ||
@@ -406,7 +410,7 @@ int ip6_forward(struct sk_buff *skb) | |||
406 | skb->dev = dst->dev; | 410 | skb->dev = dst->dev; |
407 | icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, | 411 | icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, |
408 | 0, skb->dev); | 412 | 0, skb->dev); |
409 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 413 | IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); |
410 | 414 | ||
411 | kfree_skb(skb); | 415 | kfree_skb(skb); |
412 | return -ETIMEDOUT; | 416 | return -ETIMEDOUT; |
@@ -419,13 +423,13 @@ int ip6_forward(struct sk_buff *skb) | |||
419 | if (proxied > 0) | 423 | if (proxied > 0) |
420 | return ip6_input(skb); | 424 | return ip6_input(skb); |
421 | else if (proxied < 0) { | 425 | else if (proxied < 0) { |
422 | IP6_INC_STATS(IPSTATS_MIB_INDISCARDS); | 426 | IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); |
423 | goto drop; | 427 | goto drop; |
424 | } | 428 | } |
425 | } | 429 | } |
426 | 430 | ||
427 | if (!xfrm6_route_forward(skb)) { | 431 | if (!xfrm6_route_forward(skb)) { |
428 | IP6_INC_STATS(IPSTATS_MIB_INDISCARDS); | 432 | IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); |
429 | goto drop; | 433 | goto drop; |
430 | } | 434 | } |
431 | dst = skb->dst; | 435 | dst = skb->dst; |
@@ -464,14 +468,14 @@ int ip6_forward(struct sk_buff *skb) | |||
464 | /* Again, force OUTPUT device used as source address */ | 468 | /* Again, force OUTPUT device used as source address */ |
465 | skb->dev = dst->dev; | 469 | skb->dev = dst->dev; |
466 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev); | 470 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev); |
467 | IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS); | 471 | IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); |
468 | IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS); | 472 | IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS); |
469 | kfree_skb(skb); | 473 | kfree_skb(skb); |
470 | return -EMSGSIZE; | 474 | return -EMSGSIZE; |
471 | } | 475 | } |
472 | 476 | ||
473 | if (skb_cow(skb, dst->dev->hard_header_len)) { | 477 | if (skb_cow(skb, dst->dev->hard_header_len)) { |
474 | IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 478 | IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); |
475 | goto drop; | 479 | goto drop; |
476 | } | 480 | } |
477 | 481 | ||
@@ -481,11 +485,11 @@ int ip6_forward(struct sk_buff *skb) | |||
481 | 485 | ||
482 | hdr->hop_limit--; | 486 | hdr->hop_limit--; |
483 | 487 | ||
484 | IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS); | 488 | IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); |
485 | return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); | 489 | return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); |
486 | 490 | ||
487 | error: | 491 | error: |
488 | IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); | 492 | IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); |
489 | drop: | 493 | drop: |
490 | kfree_skb(skb); | 494 | kfree_skb(skb); |
491 | return -EINVAL; | 495 | return -EINVAL; |
@@ -499,12 +503,12 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
499 | dst_release(to->dst); | 503 | dst_release(to->dst); |
500 | to->dst = dst_clone(from->dst); | 504 | to->dst = dst_clone(from->dst); |
501 | to->dev = from->dev; | 505 | to->dev = from->dev; |
506 | to->mark = from->mark; | ||
502 | 507 | ||
503 | #ifdef CONFIG_NET_SCHED | 508 | #ifdef CONFIG_NET_SCHED |
504 | to->tc_index = from->tc_index; | 509 | to->tc_index = from->tc_index; |
505 | #endif | 510 | #endif |
506 | #ifdef CONFIG_NETFILTER | 511 | #ifdef CONFIG_NETFILTER |
507 | to->nfmark = from->nfmark; | ||
508 | /* Connection association is same as pre-frag packet */ | 512 | /* Connection association is same as pre-frag packet */ |
509 | nf_conntrack_put(to->nfct); | 513 | nf_conntrack_put(to->nfct); |
510 | to->nfct = from->nfct; | 514 | to->nfct = from->nfct; |
@@ -571,7 +575,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
571 | struct ipv6hdr *tmp_hdr; | 575 | struct ipv6hdr *tmp_hdr; |
572 | struct frag_hdr *fh; | 576 | struct frag_hdr *fh; |
573 | unsigned int mtu, hlen, left, len; | 577 | unsigned int mtu, hlen, left, len; |
574 | u32 frag_id = 0; | 578 | __be32 frag_id = 0; |
575 | int ptr, offset = 0, err=0; | 579 | int ptr, offset = 0, err=0; |
576 | u8 *prevhdr, nexthdr = 0; | 580 | u8 *prevhdr, nexthdr = 0; |
577 | 581 | ||
@@ -620,14 +624,13 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
620 | skb_shinfo(skb)->frag_list = NULL; | 624 | skb_shinfo(skb)->frag_list = NULL; |
621 | /* BUILD HEADER */ | 625 | /* BUILD HEADER */ |
622 | 626 | ||
623 | tmp_hdr = kmalloc(hlen, GFP_ATOMIC); | 627 | tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC); |
624 | if (!tmp_hdr) { | 628 | if (!tmp_hdr) { |
625 | IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); | 629 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); |
626 | return -ENOMEM; | 630 | return -ENOMEM; |
627 | } | 631 | } |
628 | 632 | ||
629 | *prevhdr = NEXTHDR_FRAGMENT; | 633 | *prevhdr = NEXTHDR_FRAGMENT; |
630 | memcpy(tmp_hdr, skb->nh.raw, hlen); | ||
631 | __skb_pull(skb, hlen); | 634 | __skb_pull(skb, hlen); |
632 | fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); | 635 | fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); |
633 | skb->nh.raw = __skb_push(skb, hlen); | 636 | skb->nh.raw = __skb_push(skb, hlen); |
@@ -643,7 +646,8 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
643 | skb->data_len = first_len - skb_headlen(skb); | 646 | skb->data_len = first_len - skb_headlen(skb); |
644 | skb->len = first_len; | 647 | skb->len = first_len; |
645 | skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr)); | 648 | skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr)); |
646 | 649 | ||
650 | dst_hold(&rt->u.dst); | ||
647 | 651 | ||
648 | for (;;) { | 652 | for (;;) { |
649 | /* Prepare header of the next frame, | 653 | /* Prepare header of the next frame, |
@@ -667,7 +671,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
667 | 671 | ||
668 | err = output(skb); | 672 | err = output(skb); |
669 | if(!err) | 673 | if(!err) |
670 | IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); | 674 | IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES); |
671 | 675 | ||
672 | if (err || !frag) | 676 | if (err || !frag) |
673 | break; | 677 | break; |
@@ -680,7 +684,8 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
680 | kfree(tmp_hdr); | 684 | kfree(tmp_hdr); |
681 | 685 | ||
682 | if (err == 0) { | 686 | if (err == 0) { |
683 | IP6_INC_STATS(IPSTATS_MIB_FRAGOKS); | 687 | IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS); |
688 | dst_release(&rt->u.dst); | ||
684 | return 0; | 689 | return 0; |
685 | } | 690 | } |
686 | 691 | ||
@@ -690,7 +695,8 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
690 | frag = skb; | 695 | frag = skb; |
691 | } | 696 | } |
692 | 697 | ||
693 | IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); | 698 | IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS); |
699 | dst_release(&rt->u.dst); | ||
694 | return err; | 700 | return err; |
695 | } | 701 | } |
696 | 702 | ||
@@ -723,7 +729,8 @@ slow_path: | |||
723 | 729 | ||
724 | if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { | 730 | if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { |
725 | NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); | 731 | NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); |
726 | IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); | 732 | IP6_INC_STATS(ip6_dst_idev(skb->dst), |
733 | IPSTATS_MIB_FRAGFAILS); | ||
727 | err = -ENOMEM; | 734 | err = -ENOMEM; |
728 | goto fail; | 735 | goto fail; |
729 | } | 736 | } |
@@ -784,15 +791,17 @@ slow_path: | |||
784 | if (err) | 791 | if (err) |
785 | goto fail; | 792 | goto fail; |
786 | 793 | ||
787 | IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); | 794 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES); |
788 | } | 795 | } |
796 | IP6_INC_STATS(ip6_dst_idev(skb->dst), | ||
797 | IPSTATS_MIB_FRAGOKS); | ||
789 | kfree_skb(skb); | 798 | kfree_skb(skb); |
790 | IP6_INC_STATS(IPSTATS_MIB_FRAGOKS); | ||
791 | return err; | 799 | return err; |
792 | 800 | ||
793 | fail: | 801 | fail: |
802 | IP6_INC_STATS(ip6_dst_idev(skb->dst), | ||
803 | IPSTATS_MIB_FRAGFAILS); | ||
794 | kfree_skb(skb); | 804 | kfree_skb(skb); |
795 | IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); | ||
796 | return err; | 805 | return err; |
797 | } | 806 | } |
798 | 807 | ||
@@ -1265,7 +1274,7 @@ alloc_new_skb: | |||
1265 | return 0; | 1274 | return 0; |
1266 | error: | 1275 | error: |
1267 | inet->cork.length -= length; | 1276 | inet->cork.length -= length; |
1268 | IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 1277 | IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); |
1269 | return err; | 1278 | return err; |
1270 | } | 1279 | } |
1271 | 1280 | ||
@@ -1311,7 +1320,7 @@ int ip6_push_pending_frames(struct sock *sk) | |||
1311 | 1320 | ||
1312 | skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr)); | 1321 | skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr)); |
1313 | 1322 | ||
1314 | *(u32*)hdr = fl->fl6_flowlabel | | 1323 | *(__be32*)hdr = fl->fl6_flowlabel | |
1315 | htonl(0x60000000 | ((int)np->cork.tclass << 20)); | 1324 | htonl(0x60000000 | ((int)np->cork.tclass << 20)); |
1316 | 1325 | ||
1317 | if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) | 1326 | if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) |
@@ -1326,7 +1335,7 @@ int ip6_push_pending_frames(struct sock *sk) | |||
1326 | skb->priority = sk->sk_priority; | 1335 | skb->priority = sk->sk_priority; |
1327 | 1336 | ||
1328 | skb->dst = dst_clone(&rt->u.dst); | 1337 | skb->dst = dst_clone(&rt->u.dst); |
1329 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 1338 | IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS); |
1330 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); | 1339 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); |
1331 | if (err) { | 1340 | if (err) { |
1332 | if (err > 0) | 1341 | if (err > 0) |
@@ -1357,7 +1366,8 @@ void ip6_flush_pending_frames(struct sock *sk) | |||
1357 | struct sk_buff *skb; | 1366 | struct sk_buff *skb; |
1358 | 1367 | ||
1359 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { | 1368 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { |
1360 | IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 1369 | IP6_INC_STATS(ip6_dst_idev(skb->dst), |
1370 | IPSTATS_MIB_OUTDISCARDS); | ||
1361 | kfree_skb(skb); | 1371 | kfree_skb(skb); |
1362 | } | 1372 | } |
1363 | 1373 | ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index b9f40290d12a..8d918348f5bb 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -66,7 +66,7 @@ MODULE_LICENSE("GPL"); | |||
66 | 66 | ||
67 | #define HASH_SIZE 32 | 67 | #define HASH_SIZE 32 |
68 | 68 | ||
69 | #define HASH(addr) (((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \ | 69 | #define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \ |
70 | (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ | 70 | (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ |
71 | (HASH_SIZE - 1)) | 71 | (HASH_SIZE - 1)) |
72 | 72 | ||
@@ -215,11 +215,10 @@ ip6ip6_tnl_unlink(struct ip6_tnl *t) | |||
215 | * Create tunnel matching given parameters. | 215 | * Create tunnel matching given parameters. |
216 | * | 216 | * |
217 | * Return: | 217 | * Return: |
218 | * 0 on success | 218 | * created tunnel or NULL |
219 | **/ | 219 | **/ |
220 | 220 | ||
221 | static int | 221 | static struct ip6_tnl *ip6_tnl_create(struct ip6_tnl_parm *p) |
222 | ip6_tnl_create(struct ip6_tnl_parm *p, struct ip6_tnl **pt) | ||
223 | { | 222 | { |
224 | struct net_device *dev; | 223 | struct net_device *dev; |
225 | struct ip6_tnl *t; | 224 | struct ip6_tnl *t; |
@@ -236,11 +235,11 @@ ip6_tnl_create(struct ip6_tnl_parm *p, struct ip6_tnl **pt) | |||
236 | break; | 235 | break; |
237 | } | 236 | } |
238 | if (i == IP6_TNL_MAX) | 237 | if (i == IP6_TNL_MAX) |
239 | return -ENOBUFS; | 238 | goto failed; |
240 | } | 239 | } |
241 | dev = alloc_netdev(sizeof (*t), name, ip6ip6_tnl_dev_setup); | 240 | dev = alloc_netdev(sizeof (*t), name, ip6ip6_tnl_dev_setup); |
242 | if (dev == NULL) | 241 | if (dev == NULL) |
243 | return -ENOMEM; | 242 | goto failed; |
244 | 243 | ||
245 | t = netdev_priv(dev); | 244 | t = netdev_priv(dev); |
246 | dev->init = ip6ip6_tnl_dev_init; | 245 | dev->init = ip6ip6_tnl_dev_init; |
@@ -248,13 +247,13 @@ ip6_tnl_create(struct ip6_tnl_parm *p, struct ip6_tnl **pt) | |||
248 | 247 | ||
249 | if ((err = register_netdevice(dev)) < 0) { | 248 | if ((err = register_netdevice(dev)) < 0) { |
250 | free_netdev(dev); | 249 | free_netdev(dev); |
251 | return err; | 250 | goto failed; |
252 | } | 251 | } |
253 | dev_hold(dev); | 252 | dev_hold(dev); |
254 | |||
255 | ip6ip6_tnl_link(t); | 253 | ip6ip6_tnl_link(t); |
256 | *pt = t; | 254 | return t; |
257 | return 0; | 255 | failed: |
256 | return NULL; | ||
258 | } | 257 | } |
259 | 258 | ||
260 | /** | 259 | /** |
@@ -268,32 +267,23 @@ ip6_tnl_create(struct ip6_tnl_parm *p, struct ip6_tnl **pt) | |||
268 | * tunnel device is created and registered for use. | 267 | * tunnel device is created and registered for use. |
269 | * | 268 | * |
270 | * Return: | 269 | * Return: |
271 | * 0 if tunnel located or created, | 270 | * matching tunnel or NULL |
272 | * -EINVAL if parameters incorrect, | ||
273 | * -ENODEV if no matching tunnel available | ||
274 | **/ | 271 | **/ |
275 | 272 | ||
276 | static int | 273 | static struct ip6_tnl *ip6ip6_tnl_locate(struct ip6_tnl_parm *p, int create) |
277 | ip6ip6_tnl_locate(struct ip6_tnl_parm *p, struct ip6_tnl **pt, int create) | ||
278 | { | 274 | { |
279 | struct in6_addr *remote = &p->raddr; | 275 | struct in6_addr *remote = &p->raddr; |
280 | struct in6_addr *local = &p->laddr; | 276 | struct in6_addr *local = &p->laddr; |
281 | struct ip6_tnl *t; | 277 | struct ip6_tnl *t; |
282 | 278 | ||
283 | if (p->proto != IPPROTO_IPV6) | ||
284 | return -EINVAL; | ||
285 | |||
286 | for (t = *ip6ip6_bucket(p); t; t = t->next) { | 279 | for (t = *ip6ip6_bucket(p); t; t = t->next) { |
287 | if (ipv6_addr_equal(local, &t->parms.laddr) && | 280 | if (ipv6_addr_equal(local, &t->parms.laddr) && |
288 | ipv6_addr_equal(remote, &t->parms.raddr)) { | 281 | ipv6_addr_equal(remote, &t->parms.raddr)) |
289 | *pt = t; | 282 | return t; |
290 | return (create ? -EEXIST : 0); | ||
291 | } | ||
292 | } | 283 | } |
293 | if (!create) | 284 | if (!create) |
294 | return -ENODEV; | 285 | return NULL; |
295 | 286 | return ip6_tnl_create(p); | |
296 | return ip6_tnl_create(p, pt); | ||
297 | } | 287 | } |
298 | 288 | ||
299 | /** | 289 | /** |
@@ -391,7 +381,7 @@ parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw) | |||
391 | 381 | ||
392 | static int | 382 | static int |
393 | ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 383 | ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
394 | int type, int code, int offset, __u32 info) | 384 | int type, int code, int offset, __be32 info) |
395 | { | 385 | { |
396 | struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data; | 386 | struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data; |
397 | struct ip6_tnl *t; | 387 | struct ip6_tnl *t; |
@@ -434,12 +424,9 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
434 | } | 424 | } |
435 | break; | 425 | break; |
436 | case ICMPV6_PARAMPROB: | 426 | case ICMPV6_PARAMPROB: |
437 | /* ignore if parameter problem not caused by a tunnel | 427 | teli = 0; |
438 | encapsulation limit sub-option */ | 428 | if (code == ICMPV6_HDR_FIELD) |
439 | if (code != ICMPV6_HDR_FIELD) { | 429 | teli = parse_tlv_tnl_enc_lim(skb, skb->data); |
440 | break; | ||
441 | } | ||
442 | teli = parse_tlv_tnl_enc_lim(skb, skb->data); | ||
443 | 430 | ||
444 | if (teli && teli == ntohl(info) - 2) { | 431 | if (teli && teli == ntohl(info) - 2) { |
445 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; | 432 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; |
@@ -451,6 +438,10 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
451 | "tunnel!\n", t->parms.name); | 438 | "tunnel!\n", t->parms.name); |
452 | rel_msg = 1; | 439 | rel_msg = 1; |
453 | } | 440 | } |
441 | } else if (net_ratelimit()) { | ||
442 | printk(KERN_WARNING | ||
443 | "%s: Recipient unable to parse tunneled " | ||
444 | "packet!\n ", t->parms.name); | ||
454 | } | 445 | } |
455 | break; | 446 | break; |
456 | case ICMPV6_PKT_TOOBIG: | 447 | case ICMPV6_PKT_TOOBIG: |
@@ -470,6 +461,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
470 | if (rel_msg && pskb_may_pull(skb, offset + sizeof (*ipv6h))) { | 461 | if (rel_msg && pskb_may_pull(skb, offset + sizeof (*ipv6h))) { |
471 | struct rt6_info *rt; | 462 | struct rt6_info *rt; |
472 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | 463 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
464 | |||
473 | if (!skb2) | 465 | if (!skb2) |
474 | goto out; | 466 | goto out; |
475 | 467 | ||
@@ -504,6 +496,27 @@ static inline void ip6ip6_ecn_decapsulate(struct ipv6hdr *outer_iph, | |||
504 | if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph))) | 496 | if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph))) |
505 | IP6_ECN_set_ce(inner_iph); | 497 | IP6_ECN_set_ce(inner_iph); |
506 | } | 498 | } |
499 | static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) | ||
500 | { | ||
501 | struct ip6_tnl_parm *p = &t->parms; | ||
502 | int ret = 0; | ||
503 | |||
504 | if (p->flags & IP6_TNL_F_CAP_RCV) { | ||
505 | struct net_device *ldev = NULL; | ||
506 | |||
507 | if (p->link) | ||
508 | ldev = dev_get_by_index(p->link); | ||
509 | |||
510 | if ((ipv6_addr_is_multicast(&p->laddr) || | ||
511 | likely(ipv6_chk_addr(&p->laddr, ldev, 0))) && | ||
512 | likely(!ipv6_chk_addr(&p->raddr, NULL, 0))) | ||
513 | ret = 1; | ||
514 | |||
515 | if (ldev) | ||
516 | dev_put(ldev); | ||
517 | } | ||
518 | return ret; | ||
519 | } | ||
507 | 520 | ||
508 | /** | 521 | /** |
509 | * ip6ip6_rcv - decapsulate IPv6 packet and retransmit it locally | 522 | * ip6ip6_rcv - decapsulate IPv6 packet and retransmit it locally |
@@ -528,7 +541,7 @@ ip6ip6_rcv(struct sk_buff *skb) | |||
528 | goto discard; | 541 | goto discard; |
529 | } | 542 | } |
530 | 543 | ||
531 | if (!(t->parms.flags & IP6_TNL_F_CAP_RCV)) { | 544 | if (!ip6_tnl_rcv_ctl(t)) { |
532 | t->stat.rx_dropped++; | 545 | t->stat.rx_dropped++; |
533 | read_unlock(&ip6ip6_lock); | 546 | read_unlock(&ip6ip6_lock); |
534 | goto discard; | 547 | goto discard; |
@@ -560,31 +573,23 @@ discard: | |||
560 | return 0; | 573 | return 0; |
561 | } | 574 | } |
562 | 575 | ||
563 | static inline struct ipv6_txoptions *create_tel(__u8 encap_limit) | 576 | struct ipv6_tel_txoption { |
564 | { | 577 | struct ipv6_txoptions ops; |
565 | struct ipv6_tlv_tnl_enc_lim *tel; | 578 | __u8 dst_opt[8]; |
566 | struct ipv6_txoptions *opt; | 579 | }; |
567 | __u8 *raw; | ||
568 | |||
569 | int opt_len = sizeof(*opt) + 8; | ||
570 | |||
571 | if (!(opt = kzalloc(opt_len, GFP_ATOMIC))) { | ||
572 | return NULL; | ||
573 | } | ||
574 | opt->tot_len = opt_len; | ||
575 | opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1); | ||
576 | opt->opt_nflen = 8; | ||
577 | 580 | ||
578 | tel = (struct ipv6_tlv_tnl_enc_lim *) (opt->dst0opt + 1); | 581 | static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) |
579 | tel->type = IPV6_TLV_TNL_ENCAP_LIMIT; | 582 | { |
580 | tel->length = 1; | 583 | memset(opt, 0, sizeof(struct ipv6_tel_txoption)); |
581 | tel->encap_limit = encap_limit; | ||
582 | 584 | ||
583 | raw = (__u8 *) opt->dst0opt; | 585 | opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; |
584 | raw[5] = IPV6_TLV_PADN; | 586 | opt->dst_opt[3] = 1; |
585 | raw[6] = 1; | 587 | opt->dst_opt[4] = encap_limit; |
588 | opt->dst_opt[5] = IPV6_TLV_PADN; | ||
589 | opt->dst_opt[6] = 1; | ||
586 | 590 | ||
587 | return opt; | 591 | opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt; |
592 | opt->ops.opt_nflen = 8; | ||
588 | } | 593 | } |
589 | 594 | ||
590 | /** | 595 | /** |
@@ -607,6 +612,34 @@ ip6ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr) | |||
607 | return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); | 612 | return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); |
608 | } | 613 | } |
609 | 614 | ||
615 | static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t) | ||
616 | { | ||
617 | struct ip6_tnl_parm *p = &t->parms; | ||
618 | int ret = 0; | ||
619 | |||
620 | if (p->flags & IP6_TNL_F_CAP_XMIT) { | ||
621 | struct net_device *ldev = NULL; | ||
622 | |||
623 | if (p->link) | ||
624 | ldev = dev_get_by_index(p->link); | ||
625 | |||
626 | if (unlikely(!ipv6_chk_addr(&p->laddr, ldev, 0))) | ||
627 | printk(KERN_WARNING | ||
628 | "%s xmit: Local address not yet configured!\n", | ||
629 | p->name); | ||
630 | else if (!ipv6_addr_is_multicast(&p->raddr) && | ||
631 | unlikely(ipv6_chk_addr(&p->raddr, NULL, 0))) | ||
632 | printk(KERN_WARNING | ||
633 | "%s xmit: Routing loop! " | ||
634 | "Remote address found on this node!\n", | ||
635 | p->name); | ||
636 | else | ||
637 | ret = 1; | ||
638 | if (ldev) | ||
639 | dev_put(ldev); | ||
640 | } | ||
641 | return ret; | ||
642 | } | ||
610 | /** | 643 | /** |
611 | * ip6ip6_tnl_xmit - encapsulate packet and send | 644 | * ip6ip6_tnl_xmit - encapsulate packet and send |
612 | * @skb: the outgoing socket buffer | 645 | * @skb: the outgoing socket buffer |
@@ -626,8 +659,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
626 | struct ip6_tnl *t = netdev_priv(dev); | 659 | struct ip6_tnl *t = netdev_priv(dev); |
627 | struct net_device_stats *stats = &t->stat; | 660 | struct net_device_stats *stats = &t->stat; |
628 | struct ipv6hdr *ipv6h = skb->nh.ipv6h; | 661 | struct ipv6hdr *ipv6h = skb->nh.ipv6h; |
629 | struct ipv6_txoptions *opt = NULL; | ||
630 | int encap_limit = -1; | 662 | int encap_limit = -1; |
663 | struct ipv6_tel_txoption opt; | ||
631 | __u16 offset; | 664 | __u16 offset; |
632 | struct flowi fl; | 665 | struct flowi fl; |
633 | struct dst_entry *dst; | 666 | struct dst_entry *dst; |
@@ -644,10 +677,9 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
644 | goto tx_err; | 677 | goto tx_err; |
645 | } | 678 | } |
646 | if (skb->protocol != htons(ETH_P_IPV6) || | 679 | if (skb->protocol != htons(ETH_P_IPV6) || |
647 | !(t->parms.flags & IP6_TNL_F_CAP_XMIT) || | 680 | !ip6_tnl_xmit_ctl(t) || ip6ip6_tnl_addr_conflict(t, ipv6h)) |
648 | ip6ip6_tnl_addr_conflict(t, ipv6h)) { | ||
649 | goto tx_err; | 681 | goto tx_err; |
650 | } | 682 | |
651 | if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) { | 683 | if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) { |
652 | struct ipv6_tlv_tnl_enc_lim *tel; | 684 | struct ipv6_tlv_tnl_enc_lim *tel; |
653 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset]; | 685 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset]; |
@@ -657,20 +689,17 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
657 | goto tx_err; | 689 | goto tx_err; |
658 | } | 690 | } |
659 | encap_limit = tel->encap_limit - 1; | 691 | encap_limit = tel->encap_limit - 1; |
660 | } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) { | 692 | } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
661 | encap_limit = t->parms.encap_limit; | 693 | encap_limit = t->parms.encap_limit; |
662 | } | 694 | |
663 | memcpy(&fl, &t->fl, sizeof (fl)); | 695 | memcpy(&fl, &t->fl, sizeof (fl)); |
664 | proto = fl.proto; | 696 | proto = fl.proto; |
665 | 697 | ||
666 | dsfield = ipv6_get_dsfield(ipv6h); | 698 | dsfield = ipv6_get_dsfield(ipv6h); |
667 | if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) | 699 | if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) |
668 | fl.fl6_flowlabel |= (*(__u32 *) ipv6h & IPV6_TCLASS_MASK); | 700 | fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); |
669 | if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)) | 701 | if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)) |
670 | fl.fl6_flowlabel |= (*(__u32 *) ipv6h & IPV6_FLOWLABEL_MASK); | 702 | fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK); |
671 | |||
672 | if (encap_limit >= 0 && (opt = create_tel(encap_limit)) == NULL) | ||
673 | goto tx_err; | ||
674 | 703 | ||
675 | if ((dst = ip6_tnl_dst_check(t)) != NULL) | 704 | if ((dst = ip6_tnl_dst_check(t)) != NULL) |
676 | dst_hold(dst); | 705 | dst_hold(dst); |
@@ -692,7 +721,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
692 | goto tx_err_dst_release; | 721 | goto tx_err_dst_release; |
693 | } | 722 | } |
694 | mtu = dst_mtu(dst) - sizeof (*ipv6h); | 723 | mtu = dst_mtu(dst) - sizeof (*ipv6h); |
695 | if (opt) { | 724 | if (encap_limit >= 0) { |
696 | max_headroom += 8; | 725 | max_headroom += 8; |
697 | mtu -= 8; | 726 | mtu -= 8; |
698 | } | 727 | } |
@@ -730,12 +759,13 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
730 | 759 | ||
731 | skb->h.raw = skb->nh.raw; | 760 | skb->h.raw = skb->nh.raw; |
732 | 761 | ||
733 | if (opt) | 762 | if (encap_limit >= 0) { |
734 | ipv6_push_nfrag_opts(skb, opt, &proto, NULL); | 763 | init_tel_txopt(&opt, encap_limit); |
735 | 764 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); | |
765 | } | ||
736 | skb->nh.raw = skb_push(skb, sizeof(struct ipv6hdr)); | 766 | skb->nh.raw = skb_push(skb, sizeof(struct ipv6hdr)); |
737 | ipv6h = skb->nh.ipv6h; | 767 | ipv6h = skb->nh.ipv6h; |
738 | *(u32*)ipv6h = fl.fl6_flowlabel | htonl(0x60000000); | 768 | *(__be32*)ipv6h = fl.fl6_flowlabel | htonl(0x60000000); |
739 | dsfield = INET_ECN_encapsulate(0, dsfield); | 769 | dsfield = INET_ECN_encapsulate(0, dsfield); |
740 | ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); | 770 | ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); |
741 | ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); | 771 | ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); |
@@ -748,7 +778,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
748 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, | 778 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, |
749 | skb->dst->dev, dst_output); | 779 | skb->dst->dev, dst_output); |
750 | 780 | ||
751 | if (err == NET_XMIT_SUCCESS || err == NET_XMIT_CN) { | 781 | if (net_xmit_eval(err) == 0) { |
752 | stats->tx_bytes += pkt_len; | 782 | stats->tx_bytes += pkt_len; |
753 | stats->tx_packets++; | 783 | stats->tx_packets++; |
754 | } else { | 784 | } else { |
@@ -756,9 +786,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
756 | stats->tx_aborted_errors++; | 786 | stats->tx_aborted_errors++; |
757 | } | 787 | } |
758 | ip6_tnl_dst_store(t, dst); | 788 | ip6_tnl_dst_store(t, dst); |
759 | |||
760 | kfree(opt); | ||
761 | |||
762 | t->recursion--; | 789 | t->recursion--; |
763 | return 0; | 790 | return 0; |
764 | tx_err_link_failure: | 791 | tx_err_link_failure: |
@@ -766,7 +793,6 @@ tx_err_link_failure: | |||
766 | dst_link_failure(skb); | 793 | dst_link_failure(skb); |
767 | tx_err_dst_release: | 794 | tx_err_dst_release: |
768 | dst_release(dst); | 795 | dst_release(dst); |
769 | kfree(opt); | ||
770 | tx_err: | 796 | tx_err: |
771 | stats->tx_errors++; | 797 | stats->tx_errors++; |
772 | stats->tx_dropped++; | 798 | stats->tx_dropped++; |
@@ -778,39 +804,19 @@ tx_err: | |||
778 | static void ip6_tnl_set_cap(struct ip6_tnl *t) | 804 | static void ip6_tnl_set_cap(struct ip6_tnl *t) |
779 | { | 805 | { |
780 | struct ip6_tnl_parm *p = &t->parms; | 806 | struct ip6_tnl_parm *p = &t->parms; |
781 | struct in6_addr *laddr = &p->laddr; | 807 | int ltype = ipv6_addr_type(&p->laddr); |
782 | struct in6_addr *raddr = &p->raddr; | 808 | int rtype = ipv6_addr_type(&p->raddr); |
783 | int ltype = ipv6_addr_type(laddr); | ||
784 | int rtype = ipv6_addr_type(raddr); | ||
785 | 809 | ||
786 | p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV); | 810 | p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV); |
787 | 811 | ||
788 | if (ltype != IPV6_ADDR_ANY && rtype != IPV6_ADDR_ANY && | 812 | if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && |
789 | ((ltype|rtype) & | 813 | rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) && |
790 | (IPV6_ADDR_UNICAST| | 814 | !((ltype|rtype) & IPV6_ADDR_LOOPBACK) && |
791 | IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL| | 815 | (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) { |
792 | IPV6_ADDR_MAPPED|IPV6_ADDR_RESERVED)) == IPV6_ADDR_UNICAST) { | 816 | if (ltype&IPV6_ADDR_UNICAST) |
793 | struct net_device *ldev = NULL; | 817 | p->flags |= IP6_TNL_F_CAP_XMIT; |
794 | int l_ok = 1; | 818 | if (rtype&IPV6_ADDR_UNICAST) |
795 | int r_ok = 1; | 819 | p->flags |= IP6_TNL_F_CAP_RCV; |
796 | |||
797 | if (p->link) | ||
798 | ldev = dev_get_by_index(p->link); | ||
799 | |||
800 | if (ltype&IPV6_ADDR_UNICAST && !ipv6_chk_addr(laddr, ldev, 0)) | ||
801 | l_ok = 0; | ||
802 | |||
803 | if (rtype&IPV6_ADDR_UNICAST && ipv6_chk_addr(raddr, NULL, 0)) | ||
804 | r_ok = 0; | ||
805 | |||
806 | if (l_ok && r_ok) { | ||
807 | if (ltype&IPV6_ADDR_UNICAST) | ||
808 | p->flags |= IP6_TNL_F_CAP_XMIT; | ||
809 | if (rtype&IPV6_ADDR_UNICAST) | ||
810 | p->flags |= IP6_TNL_F_CAP_RCV; | ||
811 | } | ||
812 | if (ldev) | ||
813 | dev_put(ldev); | ||
814 | } | 820 | } |
815 | } | 821 | } |
816 | 822 | ||
@@ -844,8 +850,11 @@ static void ip6ip6_tnl_link_config(struct ip6_tnl *t) | |||
844 | dev->iflink = p->link; | 850 | dev->iflink = p->link; |
845 | 851 | ||
846 | if (p->flags & IP6_TNL_F_CAP_XMIT) { | 852 | if (p->flags & IP6_TNL_F_CAP_XMIT) { |
853 | int strict = (ipv6_addr_type(&p->raddr) & | ||
854 | (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); | ||
855 | |||
847 | struct rt6_info *rt = rt6_lookup(&p->raddr, &p->laddr, | 856 | struct rt6_info *rt = rt6_lookup(&p->raddr, &p->laddr, |
848 | p->link, 0); | 857 | p->link, strict); |
849 | 858 | ||
850 | if (rt == NULL) | 859 | if (rt == NULL) |
851 | return; | 860 | return; |
@@ -920,26 +929,20 @@ static int | |||
920 | ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 929 | ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
921 | { | 930 | { |
922 | int err = 0; | 931 | int err = 0; |
923 | int create; | ||
924 | struct ip6_tnl_parm p; | 932 | struct ip6_tnl_parm p; |
925 | struct ip6_tnl *t = NULL; | 933 | struct ip6_tnl *t = NULL; |
926 | 934 | ||
927 | switch (cmd) { | 935 | switch (cmd) { |
928 | case SIOCGETTUNNEL: | 936 | case SIOCGETTUNNEL: |
929 | if (dev == ip6ip6_fb_tnl_dev) { | 937 | if (dev == ip6ip6_fb_tnl_dev) { |
930 | if (copy_from_user(&p, | 938 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) { |
931 | ifr->ifr_ifru.ifru_data, | ||
932 | sizeof (p))) { | ||
933 | err = -EFAULT; | 939 | err = -EFAULT; |
934 | break; | 940 | break; |
935 | } | 941 | } |
936 | if ((err = ip6ip6_tnl_locate(&p, &t, 0)) == -ENODEV) | 942 | t = ip6ip6_tnl_locate(&p, 0); |
937 | t = netdev_priv(dev); | 943 | } |
938 | else if (err) | 944 | if (t == NULL) |
939 | break; | ||
940 | } else | ||
941 | t = netdev_priv(dev); | 945 | t = netdev_priv(dev); |
942 | |||
943 | memcpy(&p, &t->parms, sizeof (p)); | 946 | memcpy(&p, &t->parms, sizeof (p)); |
944 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { | 947 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { |
945 | err = -EFAULT; | 948 | err = -EFAULT; |
@@ -948,35 +951,36 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
948 | case SIOCADDTUNNEL: | 951 | case SIOCADDTUNNEL: |
949 | case SIOCCHGTUNNEL: | 952 | case SIOCCHGTUNNEL: |
950 | err = -EPERM; | 953 | err = -EPERM; |
951 | create = (cmd == SIOCADDTUNNEL); | ||
952 | if (!capable(CAP_NET_ADMIN)) | 954 | if (!capable(CAP_NET_ADMIN)) |
953 | break; | 955 | break; |
954 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) { | 956 | err = -EFAULT; |
955 | err = -EFAULT; | 957 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) |
956 | break; | 958 | break; |
957 | } | 959 | err = -EINVAL; |
958 | if (!create && dev != ip6ip6_fb_tnl_dev) { | 960 | if (p.proto != IPPROTO_IPV6) |
959 | t = netdev_priv(dev); | ||
960 | } | ||
961 | if (!t && (err = ip6ip6_tnl_locate(&p, &t, create))) { | ||
962 | break; | 961 | break; |
963 | } | 962 | t = ip6ip6_tnl_locate(&p, cmd == SIOCADDTUNNEL); |
964 | if (cmd == SIOCCHGTUNNEL) { | 963 | if (dev != ip6ip6_fb_tnl_dev && cmd == SIOCCHGTUNNEL) { |
965 | if (t->dev != dev) { | 964 | if (t != NULL) { |
966 | err = -EEXIST; | 965 | if (t->dev != dev) { |
967 | break; | 966 | err = -EEXIST; |
968 | } | 967 | break; |
968 | } | ||
969 | } else | ||
970 | t = netdev_priv(dev); | ||
971 | |||
969 | ip6ip6_tnl_unlink(t); | 972 | ip6ip6_tnl_unlink(t); |
970 | err = ip6ip6_tnl_change(t, &p); | 973 | err = ip6ip6_tnl_change(t, &p); |
971 | ip6ip6_tnl_link(t); | 974 | ip6ip6_tnl_link(t); |
972 | netdev_state_change(dev); | 975 | netdev_state_change(dev); |
973 | } | 976 | } |
974 | if (copy_to_user(ifr->ifr_ifru.ifru_data, | 977 | if (t) { |
975 | &t->parms, sizeof (p))) { | ||
976 | err = -EFAULT; | ||
977 | } else { | ||
978 | err = 0; | 978 | err = 0; |
979 | } | 979 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p))) |
980 | err = -EFAULT; | ||
981 | |||
982 | } else | ||
983 | err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); | ||
980 | break; | 984 | break; |
981 | case SIOCDELTUNNEL: | 985 | case SIOCDELTUNNEL: |
982 | err = -EPERM; | 986 | err = -EPERM; |
@@ -984,22 +988,18 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
984 | break; | 988 | break; |
985 | 989 | ||
986 | if (dev == ip6ip6_fb_tnl_dev) { | 990 | if (dev == ip6ip6_fb_tnl_dev) { |
987 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, | 991 | err = -EFAULT; |
988 | sizeof (p))) { | 992 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) |
989 | err = -EFAULT; | ||
990 | break; | 993 | break; |
991 | } | 994 | err = -ENOENT; |
992 | err = ip6ip6_tnl_locate(&p, &t, 0); | 995 | if ((t = ip6ip6_tnl_locate(&p, 0)) == NULL) |
993 | if (err) | ||
994 | break; | 996 | break; |
995 | if (t == netdev_priv(ip6ip6_fb_tnl_dev)) { | 997 | err = -EPERM; |
996 | err = -EPERM; | 998 | if (t->dev == ip6ip6_fb_tnl_dev) |
997 | break; | 999 | break; |
998 | } | 1000 | dev = t->dev; |
999 | } else { | ||
1000 | t = netdev_priv(dev); | ||
1001 | } | 1001 | } |
1002 | err = unregister_netdevice(t->dev); | 1002 | err = unregister_netdevice(dev); |
1003 | break; | 1003 | break; |
1004 | default: | 1004 | default: |
1005 | err = -EINVAL; | 1005 | err = -EINVAL; |
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 71f59f18ede8..511730b67e97 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c | |||
@@ -176,7 +176,7 @@ out_ok: | |||
176 | } | 176 | } |
177 | 177 | ||
178 | static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 178 | static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
179 | int type, int code, int offset, __u32 info) | 179 | int type, int code, int offset, __be32 info) |
180 | { | 180 | { |
181 | __be32 spi; | 181 | __be32 spi; |
182 | struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; | 182 | struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index de6b91981b30..1eafcfc95e81 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <net/inet_common.h> | 51 | #include <net/inet_common.h> |
52 | #include <net/tcp.h> | 52 | #include <net/tcp.h> |
53 | #include <net/udp.h> | 53 | #include <net/udp.h> |
54 | #include <net/udplite.h> | ||
54 | #include <net/xfrm.h> | 55 | #include <net/xfrm.h> |
55 | 56 | ||
56 | #include <asm/uaccess.h> | 57 | #include <asm/uaccess.h> |
@@ -239,6 +240,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
239 | struct sk_buff *pktopt; | 240 | struct sk_buff *pktopt; |
240 | 241 | ||
241 | if (sk->sk_protocol != IPPROTO_UDP && | 242 | if (sk->sk_protocol != IPPROTO_UDP && |
243 | sk->sk_protocol != IPPROTO_UDPLITE && | ||
242 | sk->sk_protocol != IPPROTO_TCP) | 244 | sk->sk_protocol != IPPROTO_TCP) |
243 | break; | 245 | break; |
244 | 246 | ||
@@ -276,11 +278,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
276 | sk->sk_family = PF_INET; | 278 | sk->sk_family = PF_INET; |
277 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | 279 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
278 | } else { | 280 | } else { |
281 | struct proto *prot = &udp_prot; | ||
282 | |||
283 | if (sk->sk_protocol == IPPROTO_UDPLITE) | ||
284 | prot = &udplite_prot; | ||
279 | local_bh_disable(); | 285 | local_bh_disable(); |
280 | sock_prot_dec_use(sk->sk_prot); | 286 | sock_prot_dec_use(sk->sk_prot); |
281 | sock_prot_inc_use(&udp_prot); | 287 | sock_prot_inc_use(prot); |
282 | local_bh_enable(); | 288 | local_bh_enable(); |
283 | sk->sk_prot = &udp_prot; | 289 | sk->sk_prot = prot; |
284 | sk->sk_socket->ops = &inet_dgram_ops; | 290 | sk->sk_socket->ops = &inet_dgram_ops; |
285 | sk->sk_family = PF_INET; | 291 | sk->sk_family = PF_INET; |
286 | } | 292 | } |
@@ -813,6 +819,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
813 | switch (optname) { | 819 | switch (optname) { |
814 | case IPV6_ADDRFORM: | 820 | case IPV6_ADDRFORM: |
815 | if (sk->sk_protocol != IPPROTO_UDP && | 821 | if (sk->sk_protocol != IPPROTO_UDP && |
822 | sk->sk_protocol != IPPROTO_UDPLITE && | ||
816 | sk->sk_protocol != IPPROTO_TCP) | 823 | sk->sk_protocol != IPPROTO_TCP) |
817 | return -EINVAL; | 824 | return -EINVAL; |
818 | if (sk->sk_state != TCP_ESTABLISHED) | 825 | if (sk->sk_state != TCP_ESTABLISHED) |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 3b114e3fa2f8..a1c231a04ac2 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -83,7 +83,7 @@ | |||
83 | struct mld2_grec { | 83 | struct mld2_grec { |
84 | __u8 grec_type; | 84 | __u8 grec_type; |
85 | __u8 grec_auxwords; | 85 | __u8 grec_auxwords; |
86 | __u16 grec_nsrcs; | 86 | __be16 grec_nsrcs; |
87 | struct in6_addr grec_mca; | 87 | struct in6_addr grec_mca; |
88 | struct in6_addr grec_src[0]; | 88 | struct in6_addr grec_src[0]; |
89 | }; | 89 | }; |
@@ -91,18 +91,18 @@ struct mld2_grec { | |||
91 | struct mld2_report { | 91 | struct mld2_report { |
92 | __u8 type; | 92 | __u8 type; |
93 | __u8 resv1; | 93 | __u8 resv1; |
94 | __u16 csum; | 94 | __sum16 csum; |
95 | __u16 resv2; | 95 | __be16 resv2; |
96 | __u16 ngrec; | 96 | __be16 ngrec; |
97 | struct mld2_grec grec[0]; | 97 | struct mld2_grec grec[0]; |
98 | }; | 98 | }; |
99 | 99 | ||
100 | struct mld2_query { | 100 | struct mld2_query { |
101 | __u8 type; | 101 | __u8 type; |
102 | __u8 code; | 102 | __u8 code; |
103 | __u16 csum; | 103 | __sum16 csum; |
104 | __u16 mrc; | 104 | __be16 mrc; |
105 | __u16 resv1; | 105 | __be16 resv1; |
106 | struct in6_addr mca; | 106 | struct in6_addr mca; |
107 | #if defined(__LITTLE_ENDIAN_BITFIELD) | 107 | #if defined(__LITTLE_ENDIAN_BITFIELD) |
108 | __u8 qrv:3, | 108 | __u8 qrv:3, |
@@ -116,7 +116,7 @@ struct mld2_query { | |||
116 | #error "Please fix <asm/byteorder.h>" | 116 | #error "Please fix <asm/byteorder.h>" |
117 | #endif | 117 | #endif |
118 | __u8 qqic; | 118 | __u8 qqic; |
119 | __u16 nsrcs; | 119 | __be16 nsrcs; |
120 | struct in6_addr srcs[0]; | 120 | struct in6_addr srcs[0]; |
121 | }; | 121 | }; |
122 | 122 | ||
@@ -1465,7 +1465,7 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1465 | struct inet6_dev *idev = in6_dev_get(skb->dev); | 1465 | struct inet6_dev *idev = in6_dev_get(skb->dev); |
1466 | int err; | 1466 | int err; |
1467 | 1467 | ||
1468 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 1468 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); |
1469 | payload_len = skb->tail - (unsigned char *)skb->nh.ipv6h - | 1469 | payload_len = skb->tail - (unsigned char *)skb->nh.ipv6h - |
1470 | sizeof(struct ipv6hdr); | 1470 | sizeof(struct ipv6hdr); |
1471 | mldlen = skb->tail - skb->h.raw; | 1471 | mldlen = skb->tail - skb->h.raw; |
@@ -1477,9 +1477,9 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1477 | mld_dev_queue_xmit); | 1477 | mld_dev_queue_xmit); |
1478 | if (!err) { | 1478 | if (!err) { |
1479 | ICMP6_INC_STATS(idev,ICMP6_MIB_OUTMSGS); | 1479 | ICMP6_INC_STATS(idev,ICMP6_MIB_OUTMSGS); |
1480 | IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS); | 1480 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS); |
1481 | } else | 1481 | } else |
1482 | IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 1482 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS); |
1483 | 1483 | ||
1484 | if (likely(idev != NULL)) | 1484 | if (likely(idev != NULL)) |
1485 | in6_dev_put(idev); | 1485 | in6_dev_put(idev); |
@@ -1763,7 +1763,10 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1763 | IPV6_TLV_ROUTERALERT, 2, 0, 0, | 1763 | IPV6_TLV_ROUTERALERT, 2, 0, 0, |
1764 | IPV6_TLV_PADN, 0 }; | 1764 | IPV6_TLV_PADN, 0 }; |
1765 | 1765 | ||
1766 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 1766 | rcu_read_lock(); |
1767 | IP6_INC_STATS(__in6_dev_get(dev), | ||
1768 | IPSTATS_MIB_OUTREQUESTS); | ||
1769 | rcu_read_unlock(); | ||
1767 | snd_addr = addr; | 1770 | snd_addr = addr; |
1768 | if (type == ICMPV6_MGM_REDUCTION) { | 1771 | if (type == ICMPV6_MGM_REDUCTION) { |
1769 | snd_addr = &all_routers; | 1772 | snd_addr = &all_routers; |
@@ -1777,7 +1780,10 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1777 | skb = sock_alloc_send_skb(sk, LL_RESERVED_SPACE(dev) + full_len, 1, &err); | 1780 | skb = sock_alloc_send_skb(sk, LL_RESERVED_SPACE(dev) + full_len, 1, &err); |
1778 | 1781 | ||
1779 | if (skb == NULL) { | 1782 | if (skb == NULL) { |
1780 | IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 1783 | rcu_read_lock(); |
1784 | IP6_INC_STATS(__in6_dev_get(dev), | ||
1785 | IPSTATS_MIB_OUTDISCARDS); | ||
1786 | rcu_read_unlock(); | ||
1781 | return; | 1787 | return; |
1782 | } | 1788 | } |
1783 | 1789 | ||
@@ -1816,9 +1822,9 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1816 | else | 1822 | else |
1817 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTGROUPMEMBRESPONSES); | 1823 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTGROUPMEMBRESPONSES); |
1818 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS); | 1824 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS); |
1819 | IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS); | 1825 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS); |
1820 | } else | 1826 | } else |
1821 | IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 1827 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS); |
1822 | 1828 | ||
1823 | if (likely(idev != NULL)) | 1829 | if (likely(idev != NULL)) |
1824 | in6_dev_put(idev); | 1830 | in6_dev_put(idev); |
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index 7ccdc8fc5a31..be7dd7db65d7 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c | |||
@@ -262,10 +262,10 @@ static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb, struct | |||
262 | sel.proto = fl->proto; | 262 | sel.proto = fl->proto; |
263 | sel.dport = xfrm_flowi_dport(fl); | 263 | sel.dport = xfrm_flowi_dport(fl); |
264 | if (sel.dport) | 264 | if (sel.dport) |
265 | sel.dport_mask = ~((__u16)0); | 265 | sel.dport_mask = htons(~0); |
266 | sel.sport = xfrm_flowi_sport(fl); | 266 | sel.sport = xfrm_flowi_sport(fl); |
267 | if (sel.sport) | 267 | if (sel.sport) |
268 | sel.sport_mask = ~((__u16)0); | 268 | sel.sport_mask = htons(~0); |
269 | sel.ifindex = fl->oif; | 269 | sel.ifindex = fl->oif; |
270 | 270 | ||
271 | err = km_report(IPPROTO_DSTOPTS, &sel, | 271 | err = km_report(IPPROTO_DSTOPTS, &sel, |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 73eb8c33e9f0..56ea92837307 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -472,7 +472,9 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, | |||
472 | inc_opt = 0; | 472 | inc_opt = 0; |
473 | } | 473 | } |
474 | 474 | ||
475 | skb = sock_alloc_send_skb(sk, MAX_HEADER + len + LL_RESERVED_SPACE(dev), | 475 | skb = sock_alloc_send_skb(sk, |
476 | (MAX_HEADER + sizeof(struct ipv6hdr) + | ||
477 | len + LL_RESERVED_SPACE(dev)), | ||
476 | 1, &err); | 478 | 1, &err); |
477 | 479 | ||
478 | if (skb == NULL) { | 480 | if (skb == NULL) { |
@@ -513,7 +515,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, | |||
513 | 515 | ||
514 | skb->dst = dst; | 516 | skb->dst = dst; |
515 | idev = in6_dev_get(dst->dev); | 517 | idev = in6_dev_get(dst->dev); |
516 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 518 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); |
517 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output); | 519 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output); |
518 | if (!err) { | 520 | if (!err) { |
519 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS); | 521 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS); |
@@ -561,7 +563,9 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, | |||
561 | if (send_llinfo) | 563 | if (send_llinfo) |
562 | len += ndisc_opt_addr_space(dev); | 564 | len += ndisc_opt_addr_space(dev); |
563 | 565 | ||
564 | skb = sock_alloc_send_skb(sk, MAX_HEADER + len + LL_RESERVED_SPACE(dev), | 566 | skb = sock_alloc_send_skb(sk, |
567 | (MAX_HEADER + sizeof(struct ipv6hdr) + | ||
568 | len + LL_RESERVED_SPACE(dev)), | ||
565 | 1, &err); | 569 | 1, &err); |
566 | if (skb == NULL) { | 570 | if (skb == NULL) { |
567 | ND_PRINTK0(KERN_ERR | 571 | ND_PRINTK0(KERN_ERR |
@@ -597,7 +601,7 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, | |||
597 | /* send it! */ | 601 | /* send it! */ |
598 | skb->dst = dst; | 602 | skb->dst = dst; |
599 | idev = in6_dev_get(dst->dev); | 603 | idev = in6_dev_get(dst->dev); |
600 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 604 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); |
601 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output); | 605 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output); |
602 | if (!err) { | 606 | if (!err) { |
603 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORSOLICITS); | 607 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTNEIGHBORSOLICITS); |
@@ -636,7 +640,9 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr, | |||
636 | if (dev->addr_len) | 640 | if (dev->addr_len) |
637 | len += ndisc_opt_addr_space(dev); | 641 | len += ndisc_opt_addr_space(dev); |
638 | 642 | ||
639 | skb = sock_alloc_send_skb(sk, MAX_HEADER + len + LL_RESERVED_SPACE(dev), | 643 | skb = sock_alloc_send_skb(sk, |
644 | (MAX_HEADER + sizeof(struct ipv6hdr) + | ||
645 | len + LL_RESERVED_SPACE(dev)), | ||
640 | 1, &err); | 646 | 1, &err); |
641 | if (skb == NULL) { | 647 | if (skb == NULL) { |
642 | ND_PRINTK0(KERN_ERR | 648 | ND_PRINTK0(KERN_ERR |
@@ -670,7 +676,7 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr, | |||
670 | /* send it! */ | 676 | /* send it! */ |
671 | skb->dst = dst; | 677 | skb->dst = dst; |
672 | idev = in6_dev_get(dst->dev); | 678 | idev = in6_dev_get(dst->dev); |
673 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 679 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); |
674 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output); | 680 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output); |
675 | if (!err) { | 681 | if (!err) { |
676 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTROUTERSOLICITS); | 682 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTROUTERSOLICITS); |
@@ -1261,10 +1267,11 @@ skip_defrtr: | |||
1261 | } | 1267 | } |
1262 | 1268 | ||
1263 | if (ndopts.nd_opts_mtu) { | 1269 | if (ndopts.nd_opts_mtu) { |
1270 | __be32 n; | ||
1264 | u32 mtu; | 1271 | u32 mtu; |
1265 | 1272 | ||
1266 | memcpy(&mtu, ((u8*)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu)); | 1273 | memcpy(&n, ((u8*)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu)); |
1267 | mtu = ntohl(mtu); | 1274 | mtu = ntohl(n); |
1268 | 1275 | ||
1269 | if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) { | 1276 | if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) { |
1270 | ND_PRINTK2(KERN_WARNING | 1277 | ND_PRINTK2(KERN_WARNING |
@@ -1446,7 +1453,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, | |||
1446 | rd_len &= ~0x7; | 1453 | rd_len &= ~0x7; |
1447 | len += rd_len; | 1454 | len += rd_len; |
1448 | 1455 | ||
1449 | buff = sock_alloc_send_skb(sk, MAX_HEADER + len + LL_RESERVED_SPACE(dev), | 1456 | buff = sock_alloc_send_skb(sk, |
1457 | (MAX_HEADER + sizeof(struct ipv6hdr) + | ||
1458 | len + LL_RESERVED_SPACE(dev)), | ||
1450 | 1, &err); | 1459 | 1, &err); |
1451 | if (buff == NULL) { | 1460 | if (buff == NULL) { |
1452 | ND_PRINTK0(KERN_ERR | 1461 | ND_PRINTK0(KERN_ERR |
@@ -1504,7 +1513,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, | |||
1504 | 1513 | ||
1505 | buff->dst = dst; | 1514 | buff->dst = dst; |
1506 | idev = in6_dev_get(dst->dev); | 1515 | idev = in6_dev_get(dst->dev); |
1507 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 1516 | IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); |
1508 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, buff, NULL, dst->dev, dst_output); | 1517 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, buff, NULL, dst->dev, dst_output); |
1509 | if (!err) { | 1518 | if (!err) { |
1510 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTREDIRECTS); | 1519 | ICMP6_INC_STATS(idev, ICMP6_MIB_OUTREDIRECTS); |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 580b1aba6722..f6294e5bcb31 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
@@ -31,7 +31,7 @@ int ip6_route_me_harder(struct sk_buff *skb) | |||
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | if (dst->error) { | 33 | if (dst->error) { |
34 | IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES); | 34 | IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); |
35 | LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n"); | 35 | LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n"); |
36 | dst_release(dst); | 36 | dst_release(dst); |
37 | return -EINVAL; | 37 | return -EINVAL; |
@@ -80,11 +80,11 @@ static int nf_ip6_reroute(struct sk_buff **pskb, const struct nf_info *info) | |||
80 | return 0; | 80 | return 0; |
81 | } | 81 | } |
82 | 82 | ||
83 | unsigned int nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, | 83 | __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, |
84 | unsigned int dataoff, u_int8_t protocol) | 84 | unsigned int dataoff, u_int8_t protocol) |
85 | { | 85 | { |
86 | struct ipv6hdr *ip6h = skb->nh.ipv6h; | 86 | struct ipv6hdr *ip6h = skb->nh.ipv6h; |
87 | unsigned int csum = 0; | 87 | __sum16 csum = 0; |
88 | 88 | ||
89 | switch (skb->ip_summed) { | 89 | switch (skb->ip_summed) { |
90 | case CHECKSUM_COMPLETE: | 90 | case CHECKSUM_COMPLETE: |
@@ -100,12 +100,13 @@ unsigned int nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, | |||
100 | } | 100 | } |
101 | /* fall through */ | 101 | /* fall through */ |
102 | case CHECKSUM_NONE: | 102 | case CHECKSUM_NONE: |
103 | skb->csum = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, | 103 | skb->csum = ~csum_unfold( |
104 | csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, | ||
104 | skb->len - dataoff, | 105 | skb->len - dataoff, |
105 | protocol, | 106 | protocol, |
106 | csum_sub(0, | 107 | csum_sub(0, |
107 | skb_checksum(skb, 0, | 108 | skb_checksum(skb, 0, |
108 | dataoff, 0))); | 109 | dataoff, 0)))); |
109 | csum = __skb_checksum_complete(skb); | 110 | csum = __skb_checksum_complete(skb); |
110 | } | 111 | } |
111 | return csum; | 112 | return csum; |
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index d7c45a9c15fe..fc3e5eb4bc3f 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig | |||
@@ -6,7 +6,7 @@ menu "IPv6: Netfilter Configuration (EXPERIMENTAL)" | |||
6 | depends on INET && IPV6 && NETFILTER && EXPERIMENTAL | 6 | depends on INET && IPV6 && NETFILTER && EXPERIMENTAL |
7 | 7 | ||
8 | config NF_CONNTRACK_IPV6 | 8 | config NF_CONNTRACK_IPV6 |
9 | tristate "IPv6 support for new connection tracking (EXPERIMENTAL)" | 9 | tristate "IPv6 connection tracking support (EXPERIMENTAL)" |
10 | depends on EXPERIMENTAL && NF_CONNTRACK | 10 | depends on EXPERIMENTAL && NF_CONNTRACK |
11 | ---help--- | 11 | ---help--- |
12 | Connection tracking keeps a record of what packets have passed | 12 | Connection tracking keeps a record of what packets have passed |
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 9fec832ee08b..d4d9f182441a 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c | |||
@@ -241,7 +241,7 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) | |||
241 | pmsg->data_len = data_len; | 241 | pmsg->data_len = data_len; |
242 | pmsg->timestamp_sec = entry->skb->tstamp.off_sec; | 242 | pmsg->timestamp_sec = entry->skb->tstamp.off_sec; |
243 | pmsg->timestamp_usec = entry->skb->tstamp.off_usec; | 243 | pmsg->timestamp_usec = entry->skb->tstamp.off_usec; |
244 | pmsg->mark = entry->skb->nfmark; | 244 | pmsg->mark = entry->skb->mark; |
245 | pmsg->hook = entry->info->hook; | 245 | pmsg->hook = entry->info->hook; |
246 | pmsg->hw_protocol = entry->skb->protocol; | 246 | pmsg->hw_protocol = entry->skb->protocol; |
247 | 247 | ||
@@ -620,6 +620,7 @@ static ctl_table ipq_root_table[] = { | |||
620 | { .ctl_name = 0 } | 620 | { .ctl_name = 0 } |
621 | }; | 621 | }; |
622 | 622 | ||
623 | #ifdef CONFIG_PROC_FS | ||
623 | static int | 624 | static int |
624 | ipq_get_info(char *buffer, char **start, off_t offset, int length) | 625 | ipq_get_info(char *buffer, char **start, off_t offset, int length) |
625 | { | 626 | { |
@@ -653,6 +654,7 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length) | |||
653 | len = 0; | 654 | len = 0; |
654 | return len; | 655 | return len; |
655 | } | 656 | } |
657 | #endif /* CONFIG_PROC_FS */ | ||
656 | 658 | ||
657 | static struct nf_queue_handler nfqh = { | 659 | static struct nf_queue_handler nfqh = { |
658 | .name = "ip6_queue", | 660 | .name = "ip6_queue", |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 204e02162d49..f63fb86d7c7b 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -1481,7 +1481,8 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, | |||
1481 | if (hp == NULL) | 1481 | if (hp == NULL) |
1482 | return -EBADMSG; | 1482 | return -EBADMSG; |
1483 | if (nexthdr == NEXTHDR_FRAGMENT) { | 1483 | if (nexthdr == NEXTHDR_FRAGMENT) { |
1484 | unsigned short _frag_off, *fp; | 1484 | unsigned short _frag_off; |
1485 | __be16 *fp; | ||
1485 | fp = skb_header_pointer(skb, | 1486 | fp = skb_header_pointer(skb, |
1486 | start+offsetof(struct frag_hdr, | 1487 | start+offsetof(struct frag_hdr, |
1487 | frag_off), | 1488 | frag_off), |
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index 0cf537d30185..33b1faa90d74 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c | |||
@@ -69,9 +69,9 @@ static void dump_packet(const struct nf_loginfo *info, | |||
69 | /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */ | 69 | /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */ |
70 | printk("LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ", | 70 | printk("LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ", |
71 | ntohs(ih->payload_len) + sizeof(struct ipv6hdr), | 71 | ntohs(ih->payload_len) + sizeof(struct ipv6hdr), |
72 | (ntohl(*(u_int32_t *)ih) & 0x0ff00000) >> 20, | 72 | (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20, |
73 | ih->hop_limit, | 73 | ih->hop_limit, |
74 | (ntohl(*(u_int32_t *)ih) & 0x000fffff)); | 74 | (ntohl(*(__be32 *)ih) & 0x000fffff)); |
75 | 75 | ||
76 | fragment = 0; | 76 | fragment = 0; |
77 | ptr = ip6hoff + sizeof(struct ipv6hdr); | 77 | ptr = ip6hoff + sizeof(struct ipv6hdr); |
@@ -270,11 +270,15 @@ static void dump_packet(const struct nf_loginfo *info, | |||
270 | } | 270 | } |
271 | break; | 271 | break; |
272 | } | 272 | } |
273 | case IPPROTO_UDP: { | 273 | case IPPROTO_UDP: |
274 | case IPPROTO_UDPLITE: { | ||
274 | struct udphdr _udph, *uh; | 275 | struct udphdr _udph, *uh; |
275 | 276 | ||
276 | /* Max length: 10 "PROTO=UDP " */ | 277 | if (currenthdr == IPPROTO_UDP) |
277 | printk("PROTO=UDP "); | 278 | /* Max length: 10 "PROTO=UDP " */ |
279 | printk("PROTO=UDP " ); | ||
280 | else /* Max length: 14 "PROTO=UDPLITE " */ | ||
281 | printk("PROTO=UDPLITE "); | ||
278 | 282 | ||
279 | if (fragment) | 283 | if (fragment) |
280 | break; | 284 | break; |
@@ -436,13 +440,8 @@ ip6t_log_target(struct sk_buff **pskb, | |||
436 | li.u.log.level = loginfo->level; | 440 | li.u.log.level = loginfo->level; |
437 | li.u.log.logflags = loginfo->logflags; | 441 | li.u.log.logflags = loginfo->logflags; |
438 | 442 | ||
439 | if (loginfo->logflags & IP6T_LOG_NFLOG) | 443 | ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, |
440 | nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, | 444 | loginfo->prefix); |
441 | "%s", loginfo->prefix); | ||
442 | else | ||
443 | ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, | ||
444 | loginfo->prefix); | ||
445 | |||
446 | return IP6T_CONTINUE; | 445 | return IP6T_CONTINUE; |
447 | } | 446 | } |
448 | 447 | ||
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index 386ea260e767..6250e86a6ddc 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c | |||
@@ -149,11 +149,10 @@ ip6t_local_hook(unsigned int hook, | |||
149 | int (*okfn)(struct sk_buff *)) | 149 | int (*okfn)(struct sk_buff *)) |
150 | { | 150 | { |
151 | 151 | ||
152 | unsigned long nfmark; | ||
153 | unsigned int ret; | 152 | unsigned int ret; |
154 | struct in6_addr saddr, daddr; | 153 | struct in6_addr saddr, daddr; |
155 | u_int8_t hop_limit; | 154 | u_int8_t hop_limit; |
156 | u_int32_t flowlabel; | 155 | u_int32_t flowlabel, mark; |
157 | 156 | ||
158 | #if 0 | 157 | #if 0 |
159 | /* root is playing with raw sockets. */ | 158 | /* root is playing with raw sockets. */ |
@@ -165,10 +164,10 @@ ip6t_local_hook(unsigned int hook, | |||
165 | } | 164 | } |
166 | #endif | 165 | #endif |
167 | 166 | ||
168 | /* save source/dest address, nfmark, hoplimit, flowlabel, priority, */ | 167 | /* save source/dest address, mark, hoplimit, flowlabel, priority, */ |
169 | memcpy(&saddr, &(*pskb)->nh.ipv6h->saddr, sizeof(saddr)); | 168 | memcpy(&saddr, &(*pskb)->nh.ipv6h->saddr, sizeof(saddr)); |
170 | memcpy(&daddr, &(*pskb)->nh.ipv6h->daddr, sizeof(daddr)); | 169 | memcpy(&daddr, &(*pskb)->nh.ipv6h->daddr, sizeof(daddr)); |
171 | nfmark = (*pskb)->nfmark; | 170 | mark = (*pskb)->mark; |
172 | hop_limit = (*pskb)->nh.ipv6h->hop_limit; | 171 | hop_limit = (*pskb)->nh.ipv6h->hop_limit; |
173 | 172 | ||
174 | /* flowlabel and prio (includes version, which shouldn't change either */ | 173 | /* flowlabel and prio (includes version, which shouldn't change either */ |
@@ -179,7 +178,7 @@ ip6t_local_hook(unsigned int hook, | |||
179 | if (ret != NF_DROP && ret != NF_STOLEN | 178 | if (ret != NF_DROP && ret != NF_STOLEN |
180 | && (memcmp(&(*pskb)->nh.ipv6h->saddr, &saddr, sizeof(saddr)) | 179 | && (memcmp(&(*pskb)->nh.ipv6h->saddr, &saddr, sizeof(saddr)) |
181 | || memcmp(&(*pskb)->nh.ipv6h->daddr, &daddr, sizeof(daddr)) | 180 | || memcmp(&(*pskb)->nh.ipv6h->daddr, &daddr, sizeof(daddr)) |
182 | || (*pskb)->nfmark != nfmark | 181 | || (*pskb)->mark != mark |
183 | || (*pskb)->nh.ipv6h->hop_limit != hop_limit)) | 182 | || (*pskb)->nh.ipv6h->hop_limit != hop_limit)) |
184 | return ip6_route_me_harder(*pskb) == 0 ? ret : NF_DROP; | 183 | return ip6_route_me_harder(*pskb) == 0 ? ret : NF_DROP; |
185 | 184 | ||
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index e5e53fff9e38..a20615ffccff 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <linux/netfilter_ipv6.h> | 33 | #include <linux/netfilter_ipv6.h> |
34 | #include <net/netfilter/nf_conntrack.h> | 34 | #include <net/netfilter/nf_conntrack.h> |
35 | #include <net/netfilter/nf_conntrack_helper.h> | 35 | #include <net/netfilter/nf_conntrack_helper.h> |
36 | #include <net/netfilter/nf_conntrack_protocol.h> | 36 | #include <net/netfilter/nf_conntrack_l4proto.h> |
37 | #include <net/netfilter/nf_conntrack_l3proto.h> | 37 | #include <net/netfilter/nf_conntrack_l3proto.h> |
38 | #include <net/netfilter/nf_conntrack_core.h> | 38 | #include <net/netfilter/nf_conntrack_core.h> |
39 | 39 | ||
@@ -43,8 +43,6 @@ | |||
43 | #define DEBUGP(format, args...) | 43 | #define DEBUGP(format, args...) |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); | ||
47 | |||
48 | static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, | 46 | static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, |
49 | struct nf_conntrack_tuple *tuple) | 47 | struct nf_conntrack_tuple *tuple) |
50 | { | 48 | { |
@@ -211,11 +209,6 @@ out: | |||
211 | return nf_conntrack_confirm(pskb); | 209 | return nf_conntrack_confirm(pskb); |
212 | } | 210 | } |
213 | 211 | ||
214 | extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb); | ||
215 | extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, | ||
216 | struct net_device *in, | ||
217 | struct net_device *out, | ||
218 | int (*okfn)(struct sk_buff *)); | ||
219 | static unsigned int ipv6_defrag(unsigned int hooknum, | 212 | static unsigned int ipv6_defrag(unsigned int hooknum, |
220 | struct sk_buff **pskb, | 213 | struct sk_buff **pskb, |
221 | const struct net_device *in, | 214 | const struct net_device *in, |
@@ -331,26 +324,7 @@ static struct nf_hook_ops ipv6_conntrack_ops[] = { | |||
331 | }; | 324 | }; |
332 | 325 | ||
333 | #ifdef CONFIG_SYSCTL | 326 | #ifdef CONFIG_SYSCTL |
334 | 327 | static ctl_table nf_ct_ipv6_sysctl_table[] = { | |
335 | /* From nf_conntrack_proto_icmpv6.c */ | ||
336 | extern unsigned int nf_ct_icmpv6_timeout; | ||
337 | |||
338 | /* From nf_conntrack_reasm.c */ | ||
339 | extern unsigned int nf_ct_frag6_timeout; | ||
340 | extern unsigned int nf_ct_frag6_low_thresh; | ||
341 | extern unsigned int nf_ct_frag6_high_thresh; | ||
342 | |||
343 | static struct ctl_table_header *nf_ct_ipv6_sysctl_header; | ||
344 | |||
345 | static ctl_table nf_ct_sysctl_table[] = { | ||
346 | { | ||
347 | .ctl_name = NET_NF_CONNTRACK_ICMPV6_TIMEOUT, | ||
348 | .procname = "nf_conntrack_icmpv6_timeout", | ||
349 | .data = &nf_ct_icmpv6_timeout, | ||
350 | .maxlen = sizeof(unsigned int), | ||
351 | .mode = 0644, | ||
352 | .proc_handler = &proc_dointvec_jiffies, | ||
353 | }, | ||
354 | { | 328 | { |
355 | .ctl_name = NET_NF_CONNTRACK_FRAG6_TIMEOUT, | 329 | .ctl_name = NET_NF_CONNTRACK_FRAG6_TIMEOUT, |
356 | .procname = "nf_conntrack_frag6_timeout", | 330 | .procname = "nf_conntrack_frag6_timeout", |
@@ -377,26 +351,6 @@ static ctl_table nf_ct_sysctl_table[] = { | |||
377 | }, | 351 | }, |
378 | { .ctl_name = 0 } | 352 | { .ctl_name = 0 } |
379 | }; | 353 | }; |
380 | |||
381 | static ctl_table nf_ct_netfilter_table[] = { | ||
382 | { | ||
383 | .ctl_name = NET_NETFILTER, | ||
384 | .procname = "netfilter", | ||
385 | .mode = 0555, | ||
386 | .child = nf_ct_sysctl_table, | ||
387 | }, | ||
388 | { .ctl_name = 0 } | ||
389 | }; | ||
390 | |||
391 | static ctl_table nf_ct_net_table[] = { | ||
392 | { | ||
393 | .ctl_name = CTL_NET, | ||
394 | .procname = "net", | ||
395 | .mode = 0555, | ||
396 | .child = nf_ct_netfilter_table, | ||
397 | }, | ||
398 | { .ctl_name = 0 } | ||
399 | }; | ||
400 | #endif | 354 | #endif |
401 | 355 | ||
402 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 356 | #if defined(CONFIG_NF_CT_NETLINK) || \ |
@@ -454,16 +408,14 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = { | |||
454 | .tuple_to_nfattr = ipv6_tuple_to_nfattr, | 408 | .tuple_to_nfattr = ipv6_tuple_to_nfattr, |
455 | .nfattr_to_tuple = ipv6_nfattr_to_tuple, | 409 | .nfattr_to_tuple = ipv6_nfattr_to_tuple, |
456 | #endif | 410 | #endif |
411 | #ifdef CONFIG_SYSCTL | ||
412 | .ctl_table_path = nf_net_netfilter_sysctl_path, | ||
413 | .ctl_table = nf_ct_ipv6_sysctl_table, | ||
414 | #endif | ||
457 | .get_features = ipv6_get_features, | 415 | .get_features = ipv6_get_features, |
458 | .me = THIS_MODULE, | 416 | .me = THIS_MODULE, |
459 | }; | 417 | }; |
460 | 418 | ||
461 | extern struct nf_conntrack_protocol nf_conntrack_protocol_tcp6; | ||
462 | extern struct nf_conntrack_protocol nf_conntrack_protocol_udp6; | ||
463 | extern struct nf_conntrack_protocol nf_conntrack_protocol_icmpv6; | ||
464 | extern int nf_ct_frag6_init(void); | ||
465 | extern void nf_ct_frag6_cleanup(void); | ||
466 | |||
467 | MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6)); | 419 | MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6)); |
468 | MODULE_LICENSE("GPL"); | 420 | MODULE_LICENSE("GPL"); |
469 | MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>"); | 421 | MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>"); |
@@ -479,19 +431,19 @@ static int __init nf_conntrack_l3proto_ipv6_init(void) | |||
479 | printk("nf_conntrack_ipv6: can't initialize frag6.\n"); | 431 | printk("nf_conntrack_ipv6: can't initialize frag6.\n"); |
480 | return ret; | 432 | return ret; |
481 | } | 433 | } |
482 | ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_tcp6); | 434 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp6); |
483 | if (ret < 0) { | 435 | if (ret < 0) { |
484 | printk("nf_conntrack_ipv6: can't register tcp.\n"); | 436 | printk("nf_conntrack_ipv6: can't register tcp.\n"); |
485 | goto cleanup_frag6; | 437 | goto cleanup_frag6; |
486 | } | 438 | } |
487 | 439 | ||
488 | ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_udp6); | 440 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp6); |
489 | if (ret < 0) { | 441 | if (ret < 0) { |
490 | printk("nf_conntrack_ipv6: can't register udp.\n"); | 442 | printk("nf_conntrack_ipv6: can't register udp.\n"); |
491 | goto cleanup_tcp; | 443 | goto cleanup_tcp; |
492 | } | 444 | } |
493 | 445 | ||
494 | ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_icmpv6); | 446 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmpv6); |
495 | if (ret < 0) { | 447 | if (ret < 0) { |
496 | printk("nf_conntrack_ipv6: can't register icmpv6.\n"); | 448 | printk("nf_conntrack_ipv6: can't register icmpv6.\n"); |
497 | goto cleanup_udp; | 449 | goto cleanup_udp; |
@@ -510,28 +462,16 @@ static int __init nf_conntrack_l3proto_ipv6_init(void) | |||
510 | "hook.\n"); | 462 | "hook.\n"); |
511 | goto cleanup_ipv6; | 463 | goto cleanup_ipv6; |
512 | } | 464 | } |
513 | #ifdef CONFIG_SYSCTL | ||
514 | nf_ct_ipv6_sysctl_header = register_sysctl_table(nf_ct_net_table, 0); | ||
515 | if (nf_ct_ipv6_sysctl_header == NULL) { | ||
516 | printk("nf_conntrack: can't register to sysctl.\n"); | ||
517 | ret = -ENOMEM; | ||
518 | goto cleanup_hooks; | ||
519 | } | ||
520 | #endif | ||
521 | return ret; | 465 | return ret; |
522 | 466 | ||
523 | #ifdef CONFIG_SYSCTL | ||
524 | cleanup_hooks: | ||
525 | nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops)); | ||
526 | #endif | ||
527 | cleanup_ipv6: | 467 | cleanup_ipv6: |
528 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); | 468 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); |
529 | cleanup_icmpv6: | 469 | cleanup_icmpv6: |
530 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmpv6); | 470 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6); |
531 | cleanup_udp: | 471 | cleanup_udp: |
532 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp6); | 472 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6); |
533 | cleanup_tcp: | 473 | cleanup_tcp: |
534 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp6); | 474 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6); |
535 | cleanup_frag6: | 475 | cleanup_frag6: |
536 | nf_ct_frag6_cleanup(); | 476 | nf_ct_frag6_cleanup(); |
537 | return ret; | 477 | return ret; |
@@ -540,14 +480,11 @@ static int __init nf_conntrack_l3proto_ipv6_init(void) | |||
540 | static void __exit nf_conntrack_l3proto_ipv6_fini(void) | 480 | static void __exit nf_conntrack_l3proto_ipv6_fini(void) |
541 | { | 481 | { |
542 | synchronize_net(); | 482 | synchronize_net(); |
543 | #ifdef CONFIG_SYSCTL | ||
544 | unregister_sysctl_table(nf_ct_ipv6_sysctl_header); | ||
545 | #endif | ||
546 | nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops)); | 483 | nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops)); |
547 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); | 484 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6); |
548 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmpv6); | 485 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6); |
549 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp6); | 486 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6); |
550 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp6); | 487 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6); |
551 | nf_ct_frag6_cleanup(); | 488 | nf_ct_frag6_cleanup(); |
552 | } | 489 | } |
553 | 490 | ||
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 34d447208ffd..3905cacc69af 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c | |||
@@ -29,11 +29,11 @@ | |||
29 | #include <linux/seq_file.h> | 29 | #include <linux/seq_file.h> |
30 | #include <linux/netfilter_ipv6.h> | 30 | #include <linux/netfilter_ipv6.h> |
31 | #include <net/netfilter/nf_conntrack_tuple.h> | 31 | #include <net/netfilter/nf_conntrack_tuple.h> |
32 | #include <net/netfilter/nf_conntrack_protocol.h> | 32 | #include <net/netfilter/nf_conntrack_l4proto.h> |
33 | #include <net/netfilter/nf_conntrack_core.h> | 33 | #include <net/netfilter/nf_conntrack_core.h> |
34 | #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> | 34 | #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> |
35 | 35 | ||
36 | unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ; | 36 | static unsigned long nf_ct_icmpv6_timeout __read_mostly = 30*HZ; |
37 | 37 | ||
38 | #if 0 | 38 | #if 0 |
39 | #define DEBUGP printk | 39 | #define DEBUGP printk |
@@ -142,9 +142,6 @@ static int icmpv6_new(struct nf_conn *conntrack, | |||
142 | return 1; | 142 | return 1; |
143 | } | 143 | } |
144 | 144 | ||
145 | extern int | ||
146 | nf_ct_ipv6_skip_exthdr(struct sk_buff *skb, int start, u8 *nexthdrp, int len); | ||
147 | extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6; | ||
148 | static int | 145 | static int |
149 | icmpv6_error_message(struct sk_buff *skb, | 146 | icmpv6_error_message(struct sk_buff *skb, |
150 | unsigned int icmp6off, | 147 | unsigned int icmp6off, |
@@ -155,7 +152,7 @@ icmpv6_error_message(struct sk_buff *skb, | |||
155 | struct nf_conntrack_tuple_hash *h; | 152 | struct nf_conntrack_tuple_hash *h; |
156 | struct icmp6hdr _hdr, *hp; | 153 | struct icmp6hdr _hdr, *hp; |
157 | unsigned int inip6off; | 154 | unsigned int inip6off; |
158 | struct nf_conntrack_protocol *inproto; | 155 | struct nf_conntrack_l4proto *inproto; |
159 | u_int8_t inprotonum; | 156 | u_int8_t inprotonum; |
160 | unsigned int inprotoff; | 157 | unsigned int inprotoff; |
161 | 158 | ||
@@ -185,7 +182,7 @@ icmpv6_error_message(struct sk_buff *skb, | |||
185 | return -NF_ACCEPT; | 182 | return -NF_ACCEPT; |
186 | } | 183 | } |
187 | 184 | ||
188 | inproto = __nf_ct_proto_find(PF_INET6, inprotonum); | 185 | inproto = __nf_ct_l4proto_find(PF_INET6, inprotonum); |
189 | 186 | ||
190 | /* Are they talking about one of our connections? */ | 187 | /* Are they talking about one of our connections? */ |
191 | if (!nf_ct_get_tuple(skb, inip6off, inprotoff, PF_INET6, inprotonum, | 188 | if (!nf_ct_get_tuple(skb, inip6off, inprotoff, PF_INET6, inprotonum, |
@@ -290,7 +287,7 @@ static int icmpv6_nfattr_to_tuple(struct nfattr *tb[], | |||
290 | tuple->dst.u.icmp.code = | 287 | tuple->dst.u.icmp.code = |
291 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMPV6_CODE-1]); | 288 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMPV6_CODE-1]); |
292 | tuple->src.u.icmp.id = | 289 | tuple->src.u.icmp.id = |
293 | *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_ICMPV6_ID-1]); | 290 | *(__be16 *)NFA_DATA(tb[CTA_PROTO_ICMPV6_ID-1]); |
294 | 291 | ||
295 | if (tuple->dst.u.icmp.type < 128 | 292 | if (tuple->dst.u.icmp.type < 128 |
296 | || tuple->dst.u.icmp.type - 128 >= sizeof(invmap) | 293 | || tuple->dst.u.icmp.type - 128 >= sizeof(invmap) |
@@ -301,10 +298,27 @@ static int icmpv6_nfattr_to_tuple(struct nfattr *tb[], | |||
301 | } | 298 | } |
302 | #endif | 299 | #endif |
303 | 300 | ||
304 | struct nf_conntrack_protocol nf_conntrack_protocol_icmpv6 = | 301 | #ifdef CONFIG_SYSCTL |
302 | static struct ctl_table_header *icmpv6_sysctl_header; | ||
303 | static struct ctl_table icmpv6_sysctl_table[] = { | ||
304 | { | ||
305 | .ctl_name = NET_NF_CONNTRACK_ICMPV6_TIMEOUT, | ||
306 | .procname = "nf_conntrack_icmpv6_timeout", | ||
307 | .data = &nf_ct_icmpv6_timeout, | ||
308 | .maxlen = sizeof(unsigned int), | ||
309 | .mode = 0644, | ||
310 | .proc_handler = &proc_dointvec_jiffies, | ||
311 | }, | ||
312 | { | ||
313 | .ctl_name = 0 | ||
314 | } | ||
315 | }; | ||
316 | #endif /* CONFIG_SYSCTL */ | ||
317 | |||
318 | struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 = | ||
305 | { | 319 | { |
306 | .l3proto = PF_INET6, | 320 | .l3proto = PF_INET6, |
307 | .proto = IPPROTO_ICMPV6, | 321 | .l4proto = IPPROTO_ICMPV6, |
308 | .name = "icmpv6", | 322 | .name = "icmpv6", |
309 | .pkt_to_tuple = icmpv6_pkt_to_tuple, | 323 | .pkt_to_tuple = icmpv6_pkt_to_tuple, |
310 | .invert_tuple = icmpv6_invert_tuple, | 324 | .invert_tuple = icmpv6_invert_tuple, |
@@ -318,6 +332,10 @@ struct nf_conntrack_protocol nf_conntrack_protocol_icmpv6 = | |||
318 | .tuple_to_nfattr = icmpv6_tuple_to_nfattr, | 332 | .tuple_to_nfattr = icmpv6_tuple_to_nfattr, |
319 | .nfattr_to_tuple = icmpv6_nfattr_to_tuple, | 333 | .nfattr_to_tuple = icmpv6_nfattr_to_tuple, |
320 | #endif | 334 | #endif |
335 | #ifdef CONFIG_SYSCTL | ||
336 | .ctl_table_header = &icmpv6_sysctl_header, | ||
337 | .ctl_table = icmpv6_sysctl_table, | ||
338 | #endif | ||
321 | }; | 339 | }; |
322 | 340 | ||
323 | EXPORT_SYMBOL(nf_conntrack_protocol_icmpv6); | 341 | EXPORT_SYMBOL(nf_conntrack_l4proto_icmpv6); |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index bf93c1ea6be9..37e5fca923aa 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -72,7 +72,7 @@ struct nf_ct_frag6_queue | |||
72 | struct hlist_node list; | 72 | struct hlist_node list; |
73 | struct list_head lru_list; /* lru list member */ | 73 | struct list_head lru_list; /* lru list member */ |
74 | 74 | ||
75 | __u32 id; /* fragment id */ | 75 | __be32 id; /* fragment id */ |
76 | struct in6_addr saddr; | 76 | struct in6_addr saddr; |
77 | struct in6_addr daddr; | 77 | struct in6_addr daddr; |
78 | 78 | ||
@@ -115,28 +115,28 @@ static __inline__ void fq_unlink(struct nf_ct_frag6_queue *fq) | |||
115 | write_unlock(&nf_ct_frag6_lock); | 115 | write_unlock(&nf_ct_frag6_lock); |
116 | } | 116 | } |
117 | 117 | ||
118 | static unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr, | 118 | static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, |
119 | struct in6_addr *daddr) | 119 | struct in6_addr *daddr) |
120 | { | 120 | { |
121 | u32 a, b, c; | 121 | u32 a, b, c; |
122 | 122 | ||
123 | a = saddr->s6_addr32[0]; | 123 | a = (__force u32)saddr->s6_addr32[0]; |
124 | b = saddr->s6_addr32[1]; | 124 | b = (__force u32)saddr->s6_addr32[1]; |
125 | c = saddr->s6_addr32[2]; | 125 | c = (__force u32)saddr->s6_addr32[2]; |
126 | 126 | ||
127 | a += JHASH_GOLDEN_RATIO; | 127 | a += JHASH_GOLDEN_RATIO; |
128 | b += JHASH_GOLDEN_RATIO; | 128 | b += JHASH_GOLDEN_RATIO; |
129 | c += nf_ct_frag6_hash_rnd; | 129 | c += nf_ct_frag6_hash_rnd; |
130 | __jhash_mix(a, b, c); | 130 | __jhash_mix(a, b, c); |
131 | 131 | ||
132 | a += saddr->s6_addr32[3]; | 132 | a += (__force u32)saddr->s6_addr32[3]; |
133 | b += daddr->s6_addr32[0]; | 133 | b += (__force u32)daddr->s6_addr32[0]; |
134 | c += daddr->s6_addr32[1]; | 134 | c += (__force u32)daddr->s6_addr32[1]; |
135 | __jhash_mix(a, b, c); | 135 | __jhash_mix(a, b, c); |
136 | 136 | ||
137 | a += daddr->s6_addr32[2]; | 137 | a += (__force u32)daddr->s6_addr32[2]; |
138 | b += daddr->s6_addr32[3]; | 138 | b += (__force u32)daddr->s6_addr32[3]; |
139 | c += id; | 139 | c += (__force u32)id; |
140 | __jhash_mix(a, b, c); | 140 | __jhash_mix(a, b, c); |
141 | 141 | ||
142 | return c & (FRAG6Q_HASHSZ - 1); | 142 | return c & (FRAG6Q_HASHSZ - 1); |
@@ -338,7 +338,7 @@ static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash, | |||
338 | 338 | ||
339 | 339 | ||
340 | static struct nf_ct_frag6_queue * | 340 | static struct nf_ct_frag6_queue * |
341 | nf_ct_frag6_create(unsigned int hash, u32 id, struct in6_addr *src, struct in6_addr *dst) | 341 | nf_ct_frag6_create(unsigned int hash, __be32 id, struct in6_addr *src, struct in6_addr *dst) |
342 | { | 342 | { |
343 | struct nf_ct_frag6_queue *fq; | 343 | struct nf_ct_frag6_queue *fq; |
344 | 344 | ||
@@ -366,7 +366,7 @@ oom: | |||
366 | } | 366 | } |
367 | 367 | ||
368 | static __inline__ struct nf_ct_frag6_queue * | 368 | static __inline__ struct nf_ct_frag6_queue * |
369 | fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst) | 369 | fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst) |
370 | { | 370 | { |
371 | struct nf_ct_frag6_queue *fq; | 371 | struct nf_ct_frag6_queue *fq; |
372 | struct hlist_node *n; | 372 | struct hlist_node *n; |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index efee7a6301a8..35249d8487bb 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -49,6 +49,8 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v) | |||
49 | fold_prot_inuse(&tcpv6_prot)); | 49 | fold_prot_inuse(&tcpv6_prot)); |
50 | seq_printf(seq, "UDP6: inuse %d\n", | 50 | seq_printf(seq, "UDP6: inuse %d\n", |
51 | fold_prot_inuse(&udpv6_prot)); | 51 | fold_prot_inuse(&udpv6_prot)); |
52 | seq_printf(seq, "UDPLITE6: inuse %d\n", | ||
53 | fold_prot_inuse(&udplitev6_prot)); | ||
52 | seq_printf(seq, "RAW6: inuse %d\n", | 54 | seq_printf(seq, "RAW6: inuse %d\n", |
53 | fold_prot_inuse(&rawv6_prot)); | 55 | fold_prot_inuse(&rawv6_prot)); |
54 | seq_printf(seq, "FRAG6: inuse %d memory %d\n", | 56 | seq_printf(seq, "FRAG6: inuse %d memory %d\n", |
@@ -133,6 +135,14 @@ static struct snmp_mib snmp6_udp6_list[] = { | |||
133 | SNMP_MIB_SENTINEL | 135 | SNMP_MIB_SENTINEL |
134 | }; | 136 | }; |
135 | 137 | ||
138 | static struct snmp_mib snmp6_udplite6_list[] = { | ||
139 | SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS), | ||
140 | SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS), | ||
141 | SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS), | ||
142 | SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS), | ||
143 | SNMP_MIB_SENTINEL | ||
144 | }; | ||
145 | |||
136 | static unsigned long | 146 | static unsigned long |
137 | fold_field(void *mib[], int offt) | 147 | fold_field(void *mib[], int offt) |
138 | { | 148 | { |
@@ -161,11 +171,13 @@ static int snmp6_seq_show(struct seq_file *seq, void *v) | |||
161 | 171 | ||
162 | if (idev) { | 172 | if (idev) { |
163 | seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); | 173 | seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); |
174 | snmp6_seq_show_item(seq, (void **)idev->stats.ipv6, snmp6_ipstats_list); | ||
164 | snmp6_seq_show_item(seq, (void **)idev->stats.icmpv6, snmp6_icmp6_list); | 175 | snmp6_seq_show_item(seq, (void **)idev->stats.icmpv6, snmp6_icmp6_list); |
165 | } else { | 176 | } else { |
166 | snmp6_seq_show_item(seq, (void **)ipv6_statistics, snmp6_ipstats_list); | 177 | snmp6_seq_show_item(seq, (void **)ipv6_statistics, snmp6_ipstats_list); |
167 | snmp6_seq_show_item(seq, (void **)icmpv6_statistics, snmp6_icmp6_list); | 178 | snmp6_seq_show_item(seq, (void **)icmpv6_statistics, snmp6_icmp6_list); |
168 | snmp6_seq_show_item(seq, (void **)udp_stats_in6, snmp6_udp6_list); | 179 | snmp6_seq_show_item(seq, (void **)udp_stats_in6, snmp6_udp6_list); |
180 | snmp6_seq_show_item(seq, (void **)udplite_stats_in6, snmp6_udplite6_list); | ||
169 | } | 181 | } |
170 | return 0; | 182 | return 0; |
171 | } | 183 | } |
@@ -281,6 +293,9 @@ int snmp6_alloc_dev(struct inet6_dev *idev) | |||
281 | if (!idev || !idev->dev) | 293 | if (!idev || !idev->dev) |
282 | return -EINVAL; | 294 | return -EINVAL; |
283 | 295 | ||
296 | if (snmp6_mib_init((void **)idev->stats.ipv6, sizeof(struct ipstats_mib), | ||
297 | __alignof__(struct ipstats_mib)) < 0) | ||
298 | goto err_ip; | ||
284 | if (snmp6_mib_init((void **)idev->stats.icmpv6, sizeof(struct icmpv6_mib), | 299 | if (snmp6_mib_init((void **)idev->stats.icmpv6, sizeof(struct icmpv6_mib), |
285 | __alignof__(struct icmpv6_mib)) < 0) | 300 | __alignof__(struct icmpv6_mib)) < 0) |
286 | goto err_icmp; | 301 | goto err_icmp; |
@@ -288,12 +303,15 @@ int snmp6_alloc_dev(struct inet6_dev *idev) | |||
288 | return 0; | 303 | return 0; |
289 | 304 | ||
290 | err_icmp: | 305 | err_icmp: |
306 | snmp6_mib_free((void **)idev->stats.ipv6); | ||
307 | err_ip: | ||
291 | return err; | 308 | return err; |
292 | } | 309 | } |
293 | 310 | ||
294 | int snmp6_free_dev(struct inet6_dev *idev) | 311 | int snmp6_free_dev(struct inet6_dev *idev) |
295 | { | 312 | { |
296 | snmp6_mib_free((void **)idev->stats.icmpv6); | 313 | snmp6_mib_free((void **)idev->stats.icmpv6); |
314 | snmp6_mib_free((void **)idev->stats.ipv6); | ||
297 | return 0; | 315 | return 0; |
298 | } | 316 | } |
299 | 317 | ||
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index d6dedc4aec77..c2e629d6aea4 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -220,7 +220,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
220 | struct inet_sock *inet = inet_sk(sk); | 220 | struct inet_sock *inet = inet_sk(sk); |
221 | struct ipv6_pinfo *np = inet6_sk(sk); | 221 | struct ipv6_pinfo *np = inet6_sk(sk); |
222 | struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr; | 222 | struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr; |
223 | __u32 v4addr = 0; | 223 | __be32 v4addr = 0; |
224 | int addr_type; | 224 | int addr_type; |
225 | int err; | 225 | int err; |
226 | 226 | ||
@@ -290,7 +290,7 @@ out: | |||
290 | 290 | ||
291 | void rawv6_err(struct sock *sk, struct sk_buff *skb, | 291 | void rawv6_err(struct sock *sk, struct sk_buff *skb, |
292 | struct inet6_skb_parm *opt, | 292 | struct inet6_skb_parm *opt, |
293 | int type, int code, int offset, u32 info) | 293 | int type, int code, int offset, __be32 info) |
294 | { | 294 | { |
295 | struct inet_sock *inet = inet_sk(sk); | 295 | struct inet_sock *inet = inet_sk(sk); |
296 | struct ipv6_pinfo *np = inet6_sk(sk); | 296 | struct ipv6_pinfo *np = inet6_sk(sk); |
@@ -370,9 +370,9 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) | |||
370 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 370 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
371 | } | 371 | } |
372 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | 372 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) |
373 | skb->csum = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, | 373 | skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr, |
374 | &skb->nh.ipv6h->daddr, | 374 | &skb->nh.ipv6h->daddr, |
375 | skb->len, inet->num, 0); | 375 | skb->len, inet->num, 0)); |
376 | 376 | ||
377 | if (inet->hdrincl) { | 377 | if (inet->hdrincl) { |
378 | if (skb_checksum_complete(skb)) { | 378 | if (skb_checksum_complete(skb)) { |
@@ -479,8 +479,8 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, | |||
479 | int offset; | 479 | int offset; |
480 | int len; | 480 | int len; |
481 | int total_len; | 481 | int total_len; |
482 | u32 tmp_csum; | 482 | __wsum tmp_csum; |
483 | u16 csum; | 483 | __sum16 csum; |
484 | 484 | ||
485 | if (!rp->checksum) | 485 | if (!rp->checksum) |
486 | goto send; | 486 | goto send; |
@@ -530,16 +530,15 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, | |||
530 | 530 | ||
531 | /* in case cksum was not initialized */ | 531 | /* in case cksum was not initialized */ |
532 | if (unlikely(csum)) | 532 | if (unlikely(csum)) |
533 | tmp_csum = csum_sub(tmp_csum, csum); | 533 | tmp_csum = csum_sub(tmp_csum, csum_unfold(csum)); |
534 | 534 | ||
535 | tmp_csum = csum_ipv6_magic(&fl->fl6_src, | 535 | csum = csum_ipv6_magic(&fl->fl6_src, |
536 | &fl->fl6_dst, | 536 | &fl->fl6_dst, |
537 | total_len, fl->proto, tmp_csum); | 537 | total_len, fl->proto, tmp_csum); |
538 | 538 | ||
539 | if (tmp_csum == 0) | 539 | if (csum == 0 && fl->proto == IPPROTO_UDP) |
540 | tmp_csum = -1; | 540 | csum = CSUM_MANGLED_0; |
541 | 541 | ||
542 | csum = tmp_csum; | ||
543 | if (skb_store_bits(skb, offset, &csum, 2)) | 542 | if (skb_store_bits(skb, offset, &csum, 2)) |
544 | BUG(); | 543 | BUG(); |
545 | 544 | ||
@@ -586,7 +585,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, | |||
586 | if (err) | 585 | if (err) |
587 | goto error_fault; | 586 | goto error_fault; |
588 | 587 | ||
589 | IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); | 588 | IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS); |
590 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, rt->u.dst.dev, | 589 | err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, rt->u.dst.dev, |
591 | dst_output); | 590 | dst_output); |
592 | if (err > 0) | 591 | if (err > 0) |
@@ -600,7 +599,7 @@ error_fault: | |||
600 | err = -EFAULT; | 599 | err = -EFAULT; |
601 | kfree_skb(skb); | 600 | kfree_skb(skb); |
602 | error: | 601 | error: |
603 | IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 602 | IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); |
604 | return err; | 603 | return err; |
605 | } | 604 | } |
606 | 605 | ||
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index f39bbedd1327..6f9a9046510f 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <net/snmp.h> | 47 | #include <net/snmp.h> |
48 | 48 | ||
49 | #include <net/ipv6.h> | 49 | #include <net/ipv6.h> |
50 | #include <net/ip6_route.h> | ||
50 | #include <net/protocol.h> | 51 | #include <net/protocol.h> |
51 | #include <net/transp_v6.h> | 52 | #include <net/transp_v6.h> |
52 | #include <net/rawv6.h> | 53 | #include <net/rawv6.h> |
@@ -76,7 +77,7 @@ struct frag_queue | |||
76 | struct hlist_node list; | 77 | struct hlist_node list; |
77 | struct list_head lru_list; /* lru list member */ | 78 | struct list_head lru_list; /* lru list member */ |
78 | 79 | ||
79 | __u32 id; /* fragment id */ | 80 | __be32 id; /* fragment id */ |
80 | struct in6_addr saddr; | 81 | struct in6_addr saddr; |
81 | struct in6_addr daddr; | 82 | struct in6_addr daddr; |
82 | 83 | ||
@@ -124,28 +125,28 @@ static __inline__ void fq_unlink(struct frag_queue *fq) | |||
124 | * callers should be careful not to use the hash value outside the ipfrag_lock | 125 | * callers should be careful not to use the hash value outside the ipfrag_lock |
125 | * as doing so could race with ipfrag_hash_rnd being recalculated. | 126 | * as doing so could race with ipfrag_hash_rnd being recalculated. |
126 | */ | 127 | */ |
127 | static unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr, | 128 | static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, |
128 | struct in6_addr *daddr) | 129 | struct in6_addr *daddr) |
129 | { | 130 | { |
130 | u32 a, b, c; | 131 | u32 a, b, c; |
131 | 132 | ||
132 | a = saddr->s6_addr32[0]; | 133 | a = (__force u32)saddr->s6_addr32[0]; |
133 | b = saddr->s6_addr32[1]; | 134 | b = (__force u32)saddr->s6_addr32[1]; |
134 | c = saddr->s6_addr32[2]; | 135 | c = (__force u32)saddr->s6_addr32[2]; |
135 | 136 | ||
136 | a += JHASH_GOLDEN_RATIO; | 137 | a += JHASH_GOLDEN_RATIO; |
137 | b += JHASH_GOLDEN_RATIO; | 138 | b += JHASH_GOLDEN_RATIO; |
138 | c += ip6_frag_hash_rnd; | 139 | c += ip6_frag_hash_rnd; |
139 | __jhash_mix(a, b, c); | 140 | __jhash_mix(a, b, c); |
140 | 141 | ||
141 | a += saddr->s6_addr32[3]; | 142 | a += (__force u32)saddr->s6_addr32[3]; |
142 | b += daddr->s6_addr32[0]; | 143 | b += (__force u32)daddr->s6_addr32[0]; |
143 | c += daddr->s6_addr32[1]; | 144 | c += (__force u32)daddr->s6_addr32[1]; |
144 | __jhash_mix(a, b, c); | 145 | __jhash_mix(a, b, c); |
145 | 146 | ||
146 | a += daddr->s6_addr32[2]; | 147 | a += (__force u32)daddr->s6_addr32[2]; |
147 | b += daddr->s6_addr32[3]; | 148 | b += (__force u32)daddr->s6_addr32[3]; |
148 | c += id; | 149 | c += (__force u32)id; |
149 | __jhash_mix(a, b, c); | 150 | __jhash_mix(a, b, c); |
150 | 151 | ||
151 | return c & (IP6Q_HASHSZ - 1); | 152 | return c & (IP6Q_HASHSZ - 1); |
@@ -257,7 +258,7 @@ static __inline__ void fq_kill(struct frag_queue *fq) | |||
257 | } | 258 | } |
258 | } | 259 | } |
259 | 260 | ||
260 | static void ip6_evictor(void) | 261 | static void ip6_evictor(struct inet6_dev *idev) |
261 | { | 262 | { |
262 | struct frag_queue *fq; | 263 | struct frag_queue *fq; |
263 | struct list_head *tmp; | 264 | struct list_head *tmp; |
@@ -284,14 +285,14 @@ static void ip6_evictor(void) | |||
284 | spin_unlock(&fq->lock); | 285 | spin_unlock(&fq->lock); |
285 | 286 | ||
286 | fq_put(fq, &work); | 287 | fq_put(fq, &work); |
287 | IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 288 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS); |
288 | } | 289 | } |
289 | } | 290 | } |
290 | 291 | ||
291 | static void ip6_frag_expire(unsigned long data) | 292 | static void ip6_frag_expire(unsigned long data) |
292 | { | 293 | { |
293 | struct frag_queue *fq = (struct frag_queue *) data; | 294 | struct frag_queue *fq = (struct frag_queue *) data; |
294 | struct net_device *dev; | 295 | struct net_device *dev = NULL; |
295 | 296 | ||
296 | spin_lock(&fq->lock); | 297 | spin_lock(&fq->lock); |
297 | 298 | ||
@@ -300,17 +301,19 @@ static void ip6_frag_expire(unsigned long data) | |||
300 | 301 | ||
301 | fq_kill(fq); | 302 | fq_kill(fq); |
302 | 303 | ||
303 | IP6_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT); | 304 | dev = dev_get_by_index(fq->iif); |
304 | IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 305 | if (!dev) |
306 | goto out; | ||
307 | |||
308 | rcu_read_lock(); | ||
309 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); | ||
310 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); | ||
311 | rcu_read_unlock(); | ||
305 | 312 | ||
306 | /* Don't send error if the first segment did not arrive. */ | 313 | /* Don't send error if the first segment did not arrive. */ |
307 | if (!(fq->last_in&FIRST_IN) || !fq->fragments) | 314 | if (!(fq->last_in&FIRST_IN) || !fq->fragments) |
308 | goto out; | 315 | goto out; |
309 | 316 | ||
310 | dev = dev_get_by_index(fq->iif); | ||
311 | if (!dev) | ||
312 | goto out; | ||
313 | |||
314 | /* | 317 | /* |
315 | But use as source device on which LAST ARRIVED | 318 | But use as source device on which LAST ARRIVED |
316 | segment was received. And do not use fq->dev | 319 | segment was received. And do not use fq->dev |
@@ -318,8 +321,9 @@ static void ip6_frag_expire(unsigned long data) | |||
318 | */ | 321 | */ |
319 | fq->fragments->dev = dev; | 322 | fq->fragments->dev = dev; |
320 | icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); | 323 | icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); |
321 | dev_put(dev); | ||
322 | out: | 324 | out: |
325 | if (dev) | ||
326 | dev_put(dev); | ||
323 | spin_unlock(&fq->lock); | 327 | spin_unlock(&fq->lock); |
324 | fq_put(fq, NULL); | 328 | fq_put(fq, NULL); |
325 | } | 329 | } |
@@ -366,7 +370,8 @@ static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in) | |||
366 | 370 | ||
367 | 371 | ||
368 | static struct frag_queue * | 372 | static struct frag_queue * |
369 | ip6_frag_create(u32 id, struct in6_addr *src, struct in6_addr *dst) | 373 | ip6_frag_create(__be32 id, struct in6_addr *src, struct in6_addr *dst, |
374 | struct inet6_dev *idev) | ||
370 | { | 375 | { |
371 | struct frag_queue *fq; | 376 | struct frag_queue *fq; |
372 | 377 | ||
@@ -386,12 +391,13 @@ ip6_frag_create(u32 id, struct in6_addr *src, struct in6_addr *dst) | |||
386 | return ip6_frag_intern(fq); | 391 | return ip6_frag_intern(fq); |
387 | 392 | ||
388 | oom: | 393 | oom: |
389 | IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 394 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS); |
390 | return NULL; | 395 | return NULL; |
391 | } | 396 | } |
392 | 397 | ||
393 | static __inline__ struct frag_queue * | 398 | static __inline__ struct frag_queue * |
394 | fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst) | 399 | fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst, |
400 | struct inet6_dev *idev) | ||
395 | { | 401 | { |
396 | struct frag_queue *fq; | 402 | struct frag_queue *fq; |
397 | struct hlist_node *n; | 403 | struct hlist_node *n; |
@@ -410,7 +416,7 @@ fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst) | |||
410 | } | 416 | } |
411 | read_unlock(&ip6_frag_lock); | 417 | read_unlock(&ip6_frag_lock); |
412 | 418 | ||
413 | return ip6_frag_create(id, src, dst); | 419 | return ip6_frag_create(id, src, dst, idev); |
414 | } | 420 | } |
415 | 421 | ||
416 | 422 | ||
@@ -428,7 +434,8 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
428 | ((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1))); | 434 | ((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1))); |
429 | 435 | ||
430 | if ((unsigned int)end > IPV6_MAXPLEN) { | 436 | if ((unsigned int)end > IPV6_MAXPLEN) { |
431 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 437 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
438 | IPSTATS_MIB_INHDRERRORS); | ||
432 | icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw); | 439 | icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw); |
433 | return; | 440 | return; |
434 | } | 441 | } |
@@ -455,7 +462,8 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
455 | /* RFC2460 says always send parameter problem in | 462 | /* RFC2460 says always send parameter problem in |
456 | * this case. -DaveM | 463 | * this case. -DaveM |
457 | */ | 464 | */ |
458 | IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 465 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
466 | IPSTATS_MIB_INHDRERRORS); | ||
459 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, | 467 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
460 | offsetof(struct ipv6hdr, payload_len)); | 468 | offsetof(struct ipv6hdr, payload_len)); |
461 | return; | 469 | return; |
@@ -571,7 +579,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
571 | return; | 579 | return; |
572 | 580 | ||
573 | err: | 581 | err: |
574 | IP6_INC_STATS(IPSTATS_MIB_REASMFAILS); | 582 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); |
575 | kfree_skb(skb); | 583 | kfree_skb(skb); |
576 | } | 584 | } |
577 | 585 | ||
@@ -665,7 +673,9 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, | |||
665 | if (head->ip_summed == CHECKSUM_COMPLETE) | 673 | if (head->ip_summed == CHECKSUM_COMPLETE) |
666 | head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum); | 674 | head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum); |
667 | 675 | ||
668 | IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS); | 676 | rcu_read_lock(); |
677 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS); | ||
678 | rcu_read_unlock(); | ||
669 | fq->fragments = NULL; | 679 | fq->fragments = NULL; |
670 | return 1; | 680 | return 1; |
671 | 681 | ||
@@ -677,7 +687,9 @@ out_oom: | |||
677 | if (net_ratelimit()) | 687 | if (net_ratelimit()) |
678 | printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); | 688 | printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); |
679 | out_fail: | 689 | out_fail: |
680 | IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 690 | rcu_read_lock(); |
691 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); | ||
692 | rcu_read_unlock(); | ||
681 | return -1; | 693 | return -1; |
682 | } | 694 | } |
683 | 695 | ||
@@ -691,16 +703,16 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) | |||
691 | 703 | ||
692 | hdr = skb->nh.ipv6h; | 704 | hdr = skb->nh.ipv6h; |
693 | 705 | ||
694 | IP6_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); | 706 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS); |
695 | 707 | ||
696 | /* Jumbo payload inhibits frag. header */ | 708 | /* Jumbo payload inhibits frag. header */ |
697 | if (hdr->payload_len==0) { | 709 | if (hdr->payload_len==0) { |
698 | IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS); | 710 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); |
699 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw); | 711 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw); |
700 | return -1; | 712 | return -1; |
701 | } | 713 | } |
702 | if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) { | 714 | if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) { |
703 | IP6_INC_STATS(IPSTATS_MIB_INHDRERRORS); | 715 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); |
704 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw); | 716 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw); |
705 | return -1; | 717 | return -1; |
706 | } | 718 | } |
@@ -711,16 +723,17 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) | |||
711 | if (!(fhdr->frag_off & htons(0xFFF9))) { | 723 | if (!(fhdr->frag_off & htons(0xFFF9))) { |
712 | /* It is not a fragmented frame */ | 724 | /* It is not a fragmented frame */ |
713 | skb->h.raw += sizeof(struct frag_hdr); | 725 | skb->h.raw += sizeof(struct frag_hdr); |
714 | IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS); | 726 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS); |
715 | 727 | ||
716 | IP6CB(skb)->nhoff = (u8*)fhdr - skb->nh.raw; | 728 | IP6CB(skb)->nhoff = (u8*)fhdr - skb->nh.raw; |
717 | return 1; | 729 | return 1; |
718 | } | 730 | } |
719 | 731 | ||
720 | if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh) | 732 | if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh) |
721 | ip6_evictor(); | 733 | ip6_evictor(ip6_dst_idev(skb->dst)); |
722 | 734 | ||
723 | if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr)) != NULL) { | 735 | if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr, |
736 | ip6_dst_idev(skb->dst))) != NULL) { | ||
724 | int ret = -1; | 737 | int ret = -1; |
725 | 738 | ||
726 | spin_lock(&fq->lock); | 739 | spin_lock(&fq->lock); |
@@ -736,7 +749,7 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) | |||
736 | return ret; | 749 | return ret; |
737 | } | 750 | } |
738 | 751 | ||
739 | IP6_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); | 752 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); |
740 | kfree_skb(skb); | 753 | kfree_skb(skb); |
741 | return -1; | 754 | return -1; |
742 | } | 755 | } |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b39ae99122d5..9f80518aacbd 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -440,7 +440,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, | |||
440 | if (pref == ICMPV6_ROUTER_PREF_INVALID) | 440 | if (pref == ICMPV6_ROUTER_PREF_INVALID) |
441 | pref = ICMPV6_ROUTER_PREF_MEDIUM; | 441 | pref = ICMPV6_ROUTER_PREF_MEDIUM; |
442 | 442 | ||
443 | lifetime = htonl(rinfo->lifetime); | 443 | lifetime = ntohl(rinfo->lifetime); |
444 | if (lifetime == 0xffffffff) { | 444 | if (lifetime == 0xffffffff) { |
445 | /* infinity */ | 445 | /* infinity */ |
446 | } else if (lifetime > 0x7fffffff/HZ) { | 446 | } else if (lifetime > 0x7fffffff/HZ) { |
@@ -711,12 +711,10 @@ void ip6_route_input(struct sk_buff *skb) | |||
711 | .ip6_u = { | 711 | .ip6_u = { |
712 | .daddr = iph->daddr, | 712 | .daddr = iph->daddr, |
713 | .saddr = iph->saddr, | 713 | .saddr = iph->saddr, |
714 | #ifdef CONFIG_IPV6_ROUTE_FWMARK | 714 | .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK, |
715 | .fwmark = skb->nfmark, | ||
716 | #endif | ||
717 | .flowlabel = (* (u32 *) iph)&IPV6_FLOWINFO_MASK, | ||
718 | }, | 715 | }, |
719 | }, | 716 | }, |
717 | .mark = skb->mark, | ||
720 | .proto = iph->nexthdr, | 718 | .proto = iph->nexthdr, |
721 | }; | 719 | }; |
722 | 720 | ||
@@ -942,7 +940,7 @@ struct dst_entry *ndisc_dst_alloc(struct net_device *dev, | |||
942 | fib6_force_start_gc(); | 940 | fib6_force_start_gc(); |
943 | 941 | ||
944 | out: | 942 | out: |
945 | return (struct dst_entry *)rt; | 943 | return &rt->u.dst; |
946 | } | 944 | } |
947 | 945 | ||
948 | int ndisc_dst_gc(int *more) | 946 | int ndisc_dst_gc(int *more) |
@@ -1225,7 +1223,7 @@ out: | |||
1225 | if (idev) | 1223 | if (idev) |
1226 | in6_dev_put(idev); | 1224 | in6_dev_put(idev); |
1227 | if (rt) | 1225 | if (rt) |
1228 | dst_free((struct dst_entry *) rt); | 1226 | dst_free(&rt->u.dst); |
1229 | return err; | 1227 | return err; |
1230 | } | 1228 | } |
1231 | 1229 | ||
@@ -1751,9 +1749,9 @@ static inline int ip6_pkt_drop(struct sk_buff *skb, int code) | |||
1751 | { | 1749 | { |
1752 | int type = ipv6_addr_type(&skb->nh.ipv6h->daddr); | 1750 | int type = ipv6_addr_type(&skb->nh.ipv6h->daddr); |
1753 | if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED) | 1751 | if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED) |
1754 | IP6_INC_STATS(IPSTATS_MIB_INADDRERRORS); | 1752 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INADDRERRORS); |
1755 | 1753 | ||
1756 | IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES); | 1754 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_OUTNOROUTES); |
1757 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev); | 1755 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev); |
1758 | kfree_skb(skb); | 1756 | kfree_skb(skb); |
1759 | return 0; | 1757 | return 0; |
@@ -1824,7 +1822,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
1824 | rt->rt6i_flags |= RTF_LOCAL; | 1822 | rt->rt6i_flags |= RTF_LOCAL; |
1825 | rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); | 1823 | rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); |
1826 | if (rt->rt6i_nexthop == NULL) { | 1824 | if (rt->rt6i_nexthop == NULL) { |
1827 | dst_free((struct dst_entry *) rt); | 1825 | dst_free(&rt->u.dst); |
1828 | return ERR_PTR(-ENOMEM); | 1826 | return ERR_PTR(-ENOMEM); |
1829 | } | 1827 | } |
1830 | 1828 | ||
@@ -2008,6 +2006,20 @@ int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
2008 | return ip6_route_add(&cfg); | 2006 | return ip6_route_add(&cfg); |
2009 | } | 2007 | } |
2010 | 2008 | ||
2009 | static inline size_t rt6_nlmsg_size(void) | ||
2010 | { | ||
2011 | return NLMSG_ALIGN(sizeof(struct rtmsg)) | ||
2012 | + nla_total_size(16) /* RTA_SRC */ | ||
2013 | + nla_total_size(16) /* RTA_DST */ | ||
2014 | + nla_total_size(16) /* RTA_GATEWAY */ | ||
2015 | + nla_total_size(16) /* RTA_PREFSRC */ | ||
2016 | + nla_total_size(4) /* RTA_TABLE */ | ||
2017 | + nla_total_size(4) /* RTA_IIF */ | ||
2018 | + nla_total_size(4) /* RTA_OIF */ | ||
2019 | + nla_total_size(4) /* RTA_PRIORITY */ | ||
2020 | + nla_total_size(sizeof(struct rta_cacheinfo)); | ||
2021 | } | ||
2022 | |||
2011 | static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, | 2023 | static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, |
2012 | struct in6_addr *dst, struct in6_addr *src, | 2024 | struct in6_addr *dst, struct in6_addr *src, |
2013 | int iif, int type, u32 pid, u32 seq, | 2025 | int iif, int type, u32 pid, u32 seq, |
@@ -2015,7 +2027,7 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, | |||
2015 | { | 2027 | { |
2016 | struct rtmsg *rtm; | 2028 | struct rtmsg *rtm; |
2017 | struct nlmsghdr *nlh; | 2029 | struct nlmsghdr *nlh; |
2018 | struct rta_cacheinfo ci; | 2030 | long expires; |
2019 | u32 table; | 2031 | u32 table; |
2020 | 2032 | ||
2021 | if (prefix) { /* user wants prefix routes only */ | 2033 | if (prefix) { /* user wants prefix routes only */ |
@@ -2089,18 +2101,11 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, | |||
2089 | NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex); | 2101 | NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex); |
2090 | 2102 | ||
2091 | NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); | 2103 | NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); |
2092 | ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse); | 2104 | |
2093 | if (rt->rt6i_expires) | 2105 | expires = rt->rt6i_expires ? rt->rt6i_expires - jiffies : 0; |
2094 | ci.rta_expires = jiffies_to_clock_t(rt->rt6i_expires - jiffies); | 2106 | if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, |
2095 | else | 2107 | expires, rt->u.dst.error) < 0) |
2096 | ci.rta_expires = 0; | 2108 | goto nla_put_failure; |
2097 | ci.rta_used = rt->u.dst.__use; | ||
2098 | ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt); | ||
2099 | ci.rta_error = rt->u.dst.error; | ||
2100 | ci.rta_id = 0; | ||
2101 | ci.rta_ts = 0; | ||
2102 | ci.rta_tsage = 0; | ||
2103 | NLA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci); | ||
2104 | 2109 | ||
2105 | return nlmsg_end(skb, nlh); | 2110 | return nlmsg_end(skb, nlh); |
2106 | 2111 | ||
@@ -2202,7 +2207,6 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) | |||
2202 | struct sk_buff *skb; | 2207 | struct sk_buff *skb; |
2203 | u32 pid = 0, seq = 0; | 2208 | u32 pid = 0, seq = 0; |
2204 | struct nlmsghdr *nlh = NULL; | 2209 | struct nlmsghdr *nlh = NULL; |
2205 | int payload = sizeof(struct rtmsg) + 256; | ||
2206 | int err = -ENOBUFS; | 2210 | int err = -ENOBUFS; |
2207 | 2211 | ||
2208 | if (info) { | 2212 | if (info) { |
@@ -2212,15 +2216,13 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) | |||
2212 | seq = nlh->nlmsg_seq; | 2216 | seq = nlh->nlmsg_seq; |
2213 | } | 2217 | } |
2214 | 2218 | ||
2215 | skb = nlmsg_new(nlmsg_total_size(payload), gfp_any()); | 2219 | skb = nlmsg_new(rt6_nlmsg_size(), gfp_any()); |
2216 | if (skb == NULL) | 2220 | if (skb == NULL) |
2217 | goto errout; | 2221 | goto errout; |
2218 | 2222 | ||
2219 | err = rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0); | 2223 | err = rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0); |
2220 | if (err < 0) { | 2224 | /* failure implies BUG in rt6_nlmsg_size() */ |
2221 | kfree_skb(skb); | 2225 | BUG_ON(err < 0); |
2222 | goto errout; | ||
2223 | } | ||
2224 | 2226 | ||
2225 | err = rtnl_notify(skb, pid, RTNLGRP_IPV6_ROUTE, nlh, gfp_any()); | 2227 | err = rtnl_notify(skb, pid, RTNLGRP_IPV6_ROUTE, nlh, gfp_any()); |
2226 | errout: | 2228 | errout: |
@@ -2248,7 +2250,6 @@ struct rt6_proc_arg | |||
2248 | static int rt6_info_route(struct rt6_info *rt, void *p_arg) | 2250 | static int rt6_info_route(struct rt6_info *rt, void *p_arg) |
2249 | { | 2251 | { |
2250 | struct rt6_proc_arg *arg = (struct rt6_proc_arg *) p_arg; | 2252 | struct rt6_proc_arg *arg = (struct rt6_proc_arg *) p_arg; |
2251 | int i; | ||
2252 | 2253 | ||
2253 | if (arg->skip < arg->offset / RT6_INFO_LEN) { | 2254 | if (arg->skip < arg->offset / RT6_INFO_LEN) { |
2254 | arg->skip++; | 2255 | arg->skip++; |
@@ -2258,38 +2259,28 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg) | |||
2258 | if (arg->len >= arg->length) | 2259 | if (arg->len >= arg->length) |
2259 | return 0; | 2260 | return 0; |
2260 | 2261 | ||
2261 | for (i=0; i<16; i++) { | 2262 | arg->len += sprintf(arg->buffer + arg->len, |
2262 | sprintf(arg->buffer + arg->len, "%02x", | 2263 | NIP6_SEQFMT " %02x ", |
2263 | rt->rt6i_dst.addr.s6_addr[i]); | 2264 | NIP6(rt->rt6i_dst.addr), |
2264 | arg->len += 2; | ||
2265 | } | ||
2266 | arg->len += sprintf(arg->buffer + arg->len, " %02x ", | ||
2267 | rt->rt6i_dst.plen); | 2265 | rt->rt6i_dst.plen); |
2268 | 2266 | ||
2269 | #ifdef CONFIG_IPV6_SUBTREES | 2267 | #ifdef CONFIG_IPV6_SUBTREES |
2270 | for (i=0; i<16; i++) { | 2268 | arg->len += sprintf(arg->buffer + arg->len, |
2271 | sprintf(arg->buffer + arg->len, "%02x", | 2269 | NIP6_SEQFMT " %02x ", |
2272 | rt->rt6i_src.addr.s6_addr[i]); | 2270 | NIP6(rt->rt6i_src.addr), |
2273 | arg->len += 2; | ||
2274 | } | ||
2275 | arg->len += sprintf(arg->buffer + arg->len, " %02x ", | ||
2276 | rt->rt6i_src.plen); | 2271 | rt->rt6i_src.plen); |
2277 | #else | 2272 | #else |
2278 | sprintf(arg->buffer + arg->len, | 2273 | arg->len += sprintf(arg->buffer + arg->len, |
2279 | "00000000000000000000000000000000 00 "); | 2274 | "00000000000000000000000000000000 00 "); |
2280 | arg->len += 36; | ||
2281 | #endif | 2275 | #endif |
2282 | 2276 | ||
2283 | if (rt->rt6i_nexthop) { | 2277 | if (rt->rt6i_nexthop) { |
2284 | for (i=0; i<16; i++) { | 2278 | arg->len += sprintf(arg->buffer + arg->len, |
2285 | sprintf(arg->buffer + arg->len, "%02x", | 2279 | NIP6_SEQFMT, |
2286 | rt->rt6i_nexthop->primary_key[i]); | 2280 | NIP6(*((struct in6_addr *)rt->rt6i_nexthop->primary_key))); |
2287 | arg->len += 2; | ||
2288 | } | ||
2289 | } else { | 2281 | } else { |
2290 | sprintf(arg->buffer + arg->len, | 2282 | arg->len += sprintf(arg->buffer + arg->len, |
2291 | "00000000000000000000000000000000"); | 2283 | "00000000000000000000000000000000"); |
2292 | arg->len += 32; | ||
2293 | } | 2284 | } |
2294 | arg->len += sprintf(arg->buffer + arg->len, | 2285 | arg->len += sprintf(arg->buffer + arg->len, |
2295 | " %08x %08x %08x %08x %8s\n", | 2286 | " %08x %08x %08x %08x %8s\n", |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index be699f85b2c7..77b7b0911438 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -60,7 +60,7 @@ | |||
60 | */ | 60 | */ |
61 | 61 | ||
62 | #define HASH_SIZE 16 | 62 | #define HASH_SIZE 16 |
63 | #define HASH(addr) ((addr^(addr>>4))&0xF) | 63 | #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) |
64 | 64 | ||
65 | static int ipip6_fb_tunnel_init(struct net_device *dev); | 65 | static int ipip6_fb_tunnel_init(struct net_device *dev); |
66 | static int ipip6_tunnel_init(struct net_device *dev); | 66 | static int ipip6_tunnel_init(struct net_device *dev); |
@@ -76,7 +76,7 @@ static struct ip_tunnel **tunnels[4] = { tunnels_wc, tunnels_l, tunnels_r, tunne | |||
76 | 76 | ||
77 | static DEFINE_RWLOCK(ipip6_lock); | 77 | static DEFINE_RWLOCK(ipip6_lock); |
78 | 78 | ||
79 | static struct ip_tunnel * ipip6_tunnel_lookup(u32 remote, u32 local) | 79 | static struct ip_tunnel * ipip6_tunnel_lookup(__be32 remote, __be32 local) |
80 | { | 80 | { |
81 | unsigned h0 = HASH(remote); | 81 | unsigned h0 = HASH(remote); |
82 | unsigned h1 = HASH(local); | 82 | unsigned h1 = HASH(local); |
@@ -102,8 +102,8 @@ static struct ip_tunnel * ipip6_tunnel_lookup(u32 remote, u32 local) | |||
102 | 102 | ||
103 | static struct ip_tunnel ** ipip6_bucket(struct ip_tunnel *t) | 103 | static struct ip_tunnel ** ipip6_bucket(struct ip_tunnel *t) |
104 | { | 104 | { |
105 | u32 remote = t->parms.iph.daddr; | 105 | __be32 remote = t->parms.iph.daddr; |
106 | u32 local = t->parms.iph.saddr; | 106 | __be32 local = t->parms.iph.saddr; |
107 | unsigned h = 0; | 107 | unsigned h = 0; |
108 | int prio = 0; | 108 | int prio = 0; |
109 | 109 | ||
@@ -144,8 +144,8 @@ static void ipip6_tunnel_link(struct ip_tunnel *t) | |||
144 | 144 | ||
145 | static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int create) | 145 | static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int create) |
146 | { | 146 | { |
147 | u32 remote = parms->iph.daddr; | 147 | __be32 remote = parms->iph.daddr; |
148 | u32 local = parms->iph.saddr; | 148 | __be32 local = parms->iph.saddr; |
149 | struct ip_tunnel *t, **tp, *nt; | 149 | struct ip_tunnel *t, **tp, *nt; |
150 | struct net_device *dev; | 150 | struct net_device *dev; |
151 | unsigned h = 0; | 151 | unsigned h = 0; |
@@ -405,9 +405,9 @@ out: | |||
405 | /* Returns the embedded IPv4 address if the IPv6 address | 405 | /* Returns the embedded IPv4 address if the IPv6 address |
406 | comes from 6to4 (RFC 3056) addr space */ | 406 | comes from 6to4 (RFC 3056) addr space */ |
407 | 407 | ||
408 | static inline u32 try_6to4(struct in6_addr *v6dst) | 408 | static inline __be32 try_6to4(struct in6_addr *v6dst) |
409 | { | 409 | { |
410 | u32 dst = 0; | 410 | __be32 dst = 0; |
411 | 411 | ||
412 | if (v6dst->s6_addr16[0] == htons(0x2002)) { | 412 | if (v6dst->s6_addr16[0] == htons(0x2002)) { |
413 | /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */ | 413 | /* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */ |
@@ -432,7 +432,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
432 | struct net_device *tdev; /* Device to other host */ | 432 | struct net_device *tdev; /* Device to other host */ |
433 | struct iphdr *iph; /* Our new IP header */ | 433 | struct iphdr *iph; /* Our new IP header */ |
434 | int max_headroom; /* The extra header space needed */ | 434 | int max_headroom; /* The extra header space needed */ |
435 | u32 dst = tiph->daddr; | 435 | __be32 dst = tiph->daddr; |
436 | int mtu; | 436 | int mtu; |
437 | struct in6_addr *addr6; | 437 | struct in6_addr *addr6; |
438 | int addr_type; | 438 | int addr_type; |
@@ -809,7 +809,7 @@ static void __exit sit_destroy_tunnels(void) | |||
809 | } | 809 | } |
810 | } | 810 | } |
811 | 811 | ||
812 | void __exit sit_cleanup(void) | 812 | static void __exit sit_cleanup(void) |
813 | { | 813 | { |
814 | inet_del_protocol(&sit_protocol, IPPROTO_IPV6); | 814 | inet_del_protocol(&sit_protocol, IPPROTO_IPV6); |
815 | 815 | ||
@@ -819,7 +819,7 @@ void __exit sit_cleanup(void) | |||
819 | rtnl_unlock(); | 819 | rtnl_unlock(); |
820 | } | 820 | } |
821 | 821 | ||
822 | int __init sit_init(void) | 822 | static int __init sit_init(void) |
823 | { | 823 | { |
824 | int err; | 824 | int err; |
825 | 825 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4c2a7c0cafef..c25e930c2c69 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -66,10 +66,13 @@ | |||
66 | #include <linux/proc_fs.h> | 66 | #include <linux/proc_fs.h> |
67 | #include <linux/seq_file.h> | 67 | #include <linux/seq_file.h> |
68 | 68 | ||
69 | #include <linux/crypto.h> | ||
70 | #include <linux/scatterlist.h> | ||
71 | |||
69 | /* Socket used for sending RSTs and ACKs */ | 72 | /* Socket used for sending RSTs and ACKs */ |
70 | static struct socket *tcp6_socket; | 73 | static struct socket *tcp6_socket; |
71 | 74 | ||
72 | static void tcp_v6_send_reset(struct sk_buff *skb); | 75 | static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); |
73 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); | 76 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); |
74 | static void tcp_v6_send_check(struct sock *sk, int len, | 77 | static void tcp_v6_send_check(struct sock *sk, int len, |
75 | struct sk_buff *skb); | 78 | struct sk_buff *skb); |
@@ -78,6 +81,10 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); | |||
78 | 81 | ||
79 | static struct inet_connection_sock_af_ops ipv6_mapped; | 82 | static struct inet_connection_sock_af_ops ipv6_mapped; |
80 | static struct inet_connection_sock_af_ops ipv6_specific; | 83 | static struct inet_connection_sock_af_ops ipv6_specific; |
84 | #ifdef CONFIG_TCP_MD5SIG | ||
85 | static struct tcp_sock_af_ops tcp_sock_ipv6_specific; | ||
86 | static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; | ||
87 | #endif | ||
81 | 88 | ||
82 | static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | 89 | static int tcp_v6_get_port(struct sock *sk, unsigned short snum) |
83 | { | 90 | { |
@@ -98,27 +105,20 @@ static void tcp_v6_hash(struct sock *sk) | |||
98 | } | 105 | } |
99 | } | 106 | } |
100 | 107 | ||
101 | static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len, | 108 | static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len, |
102 | struct in6_addr *saddr, | 109 | struct in6_addr *saddr, |
103 | struct in6_addr *daddr, | 110 | struct in6_addr *daddr, |
104 | unsigned long base) | 111 | __wsum base) |
105 | { | 112 | { |
106 | return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); | 113 | return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); |
107 | } | 114 | } |
108 | 115 | ||
109 | static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb) | 116 | static __u32 tcp_v6_init_sequence(struct sk_buff *skb) |
110 | { | 117 | { |
111 | if (skb->protocol == htons(ETH_P_IPV6)) { | 118 | return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, |
112 | return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, | 119 | skb->nh.ipv6h->saddr.s6_addr32, |
113 | skb->nh.ipv6h->saddr.s6_addr32, | 120 | skb->h.th->dest, |
114 | skb->h.th->dest, | 121 | skb->h.th->source); |
115 | skb->h.th->source); | ||
116 | } else { | ||
117 | return secure_tcp_sequence_number(skb->nh.iph->daddr, | ||
118 | skb->nh.iph->saddr, | ||
119 | skb->h.th->dest, | ||
120 | skb->h.th->source); | ||
121 | } | ||
122 | } | 122 | } |
123 | 123 | ||
124 | static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | 124 | static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, |
@@ -215,6 +215,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
215 | 215 | ||
216 | icsk->icsk_af_ops = &ipv6_mapped; | 216 | icsk->icsk_af_ops = &ipv6_mapped; |
217 | sk->sk_backlog_rcv = tcp_v4_do_rcv; | 217 | sk->sk_backlog_rcv = tcp_v4_do_rcv; |
218 | #ifdef CONFIG_TCP_MD5SIG | ||
219 | tp->af_specific = &tcp_sock_ipv6_mapped_specific; | ||
220 | #endif | ||
218 | 221 | ||
219 | err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); | 222 | err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); |
220 | 223 | ||
@@ -222,6 +225,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
222 | icsk->icsk_ext_hdr_len = exthdrlen; | 225 | icsk->icsk_ext_hdr_len = exthdrlen; |
223 | icsk->icsk_af_ops = &ipv6_specific; | 226 | icsk->icsk_af_ops = &ipv6_specific; |
224 | sk->sk_backlog_rcv = tcp_v6_do_rcv; | 227 | sk->sk_backlog_rcv = tcp_v6_do_rcv; |
228 | #ifdef CONFIG_TCP_MD5SIG | ||
229 | tp->af_specific = &tcp_sock_ipv6_specific; | ||
230 | #endif | ||
225 | goto failure; | 231 | goto failure; |
226 | } else { | 232 | } else { |
227 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), | 233 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), |
@@ -310,7 +316,7 @@ failure: | |||
310 | } | 316 | } |
311 | 317 | ||
312 | static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 318 | static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
313 | int type, int code, int offset, __u32 info) | 319 | int type, int code, int offset, __be32 info) |
314 | { | 320 | { |
315 | struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; | 321 | struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; |
316 | const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); | 322 | const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); |
@@ -509,8 +515,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
509 | 515 | ||
510 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); | 516 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); |
511 | err = ip6_xmit(sk, skb, &fl, opt, 0); | 517 | err = ip6_xmit(sk, skb, &fl, opt, 0); |
512 | if (err == NET_XMIT_CN) | 518 | err = net_xmit_eval(err); |
513 | err = 0; | ||
514 | } | 519 | } |
515 | 520 | ||
516 | done: | 521 | done: |
@@ -526,7 +531,396 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req) | |||
526 | kfree_skb(inet6_rsk(req)->pktopts); | 531 | kfree_skb(inet6_rsk(req)->pktopts); |
527 | } | 532 | } |
528 | 533 | ||
529 | static struct request_sock_ops tcp6_request_sock_ops = { | 534 | #ifdef CONFIG_TCP_MD5SIG |
535 | static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, | ||
536 | struct in6_addr *addr) | ||
537 | { | ||
538 | struct tcp_sock *tp = tcp_sk(sk); | ||
539 | int i; | ||
540 | |||
541 | BUG_ON(tp == NULL); | ||
542 | |||
543 | if (!tp->md5sig_info || !tp->md5sig_info->entries6) | ||
544 | return NULL; | ||
545 | |||
546 | for (i = 0; i < tp->md5sig_info->entries6; i++) { | ||
547 | if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0) | ||
548 | return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i]; | ||
549 | } | ||
550 | return NULL; | ||
551 | } | ||
552 | |||
553 | static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, | ||
554 | struct sock *addr_sk) | ||
555 | { | ||
556 | return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr); | ||
557 | } | ||
558 | |||
559 | static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk, | ||
560 | struct request_sock *req) | ||
561 | { | ||
562 | return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); | ||
563 | } | ||
564 | |||
565 | static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, | ||
566 | char *newkey, u8 newkeylen) | ||
567 | { | ||
568 | /* Add key to the list */ | ||
569 | struct tcp6_md5sig_key *key; | ||
570 | struct tcp_sock *tp = tcp_sk(sk); | ||
571 | struct tcp6_md5sig_key *keys; | ||
572 | |||
573 | key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); | ||
574 | if (key) { | ||
575 | /* modify existing entry - just update that one */ | ||
576 | kfree(key->key); | ||
577 | key->key = newkey; | ||
578 | key->keylen = newkeylen; | ||
579 | } else { | ||
580 | /* reallocate new list if current one is full. */ | ||
581 | if (!tp->md5sig_info) { | ||
582 | tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC); | ||
583 | if (!tp->md5sig_info) { | ||
584 | kfree(newkey); | ||
585 | return -ENOMEM; | ||
586 | } | ||
587 | } | ||
588 | tcp_alloc_md5sig_pool(); | ||
589 | if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) { | ||
590 | keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) * | ||
591 | (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); | ||
592 | |||
593 | if (!keys) { | ||
594 | tcp_free_md5sig_pool(); | ||
595 | kfree(newkey); | ||
596 | return -ENOMEM; | ||
597 | } | ||
598 | |||
599 | if (tp->md5sig_info->entries6) | ||
600 | memmove(keys, tp->md5sig_info->keys6, | ||
601 | (sizeof (tp->md5sig_info->keys6[0]) * | ||
602 | tp->md5sig_info->entries6)); | ||
603 | |||
604 | kfree(tp->md5sig_info->keys6); | ||
605 | tp->md5sig_info->keys6 = keys; | ||
606 | tp->md5sig_info->alloced6++; | ||
607 | } | ||
608 | |||
609 | ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, | ||
610 | peer); | ||
611 | tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey; | ||
612 | tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen; | ||
613 | |||
614 | tp->md5sig_info->entries6++; | ||
615 | } | ||
616 | return 0; | ||
617 | } | ||
618 | |||
619 | static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk, | ||
620 | u8 *newkey, __u8 newkeylen) | ||
621 | { | ||
622 | return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr, | ||
623 | newkey, newkeylen); | ||
624 | } | ||
625 | |||
626 | static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer) | ||
627 | { | ||
628 | struct tcp_sock *tp = tcp_sk(sk); | ||
629 | int i; | ||
630 | |||
631 | for (i = 0; i < tp->md5sig_info->entries6; i++) { | ||
632 | if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) { | ||
633 | /* Free the key */ | ||
634 | kfree(tp->md5sig_info->keys6[i].key); | ||
635 | tp->md5sig_info->entries6--; | ||
636 | |||
637 | if (tp->md5sig_info->entries6 == 0) { | ||
638 | kfree(tp->md5sig_info->keys6); | ||
639 | tp->md5sig_info->keys6 = NULL; | ||
640 | |||
641 | tcp_free_md5sig_pool(); | ||
642 | |||
643 | return 0; | ||
644 | } else { | ||
645 | /* shrink the database */ | ||
646 | if (tp->md5sig_info->entries6 != i) | ||
647 | memmove(&tp->md5sig_info->keys6[i], | ||
648 | &tp->md5sig_info->keys6[i+1], | ||
649 | (tp->md5sig_info->entries6 - i) | ||
650 | * sizeof (tp->md5sig_info->keys6[0])); | ||
651 | } | ||
652 | } | ||
653 | } | ||
654 | return -ENOENT; | ||
655 | } | ||
656 | |||
657 | static void tcp_v6_clear_md5_list (struct sock *sk) | ||
658 | { | ||
659 | struct tcp_sock *tp = tcp_sk(sk); | ||
660 | int i; | ||
661 | |||
662 | if (tp->md5sig_info->entries6) { | ||
663 | for (i = 0; i < tp->md5sig_info->entries6; i++) | ||
664 | kfree(tp->md5sig_info->keys6[i].key); | ||
665 | tp->md5sig_info->entries6 = 0; | ||
666 | tcp_free_md5sig_pool(); | ||
667 | } | ||
668 | |||
669 | kfree(tp->md5sig_info->keys6); | ||
670 | tp->md5sig_info->keys6 = NULL; | ||
671 | tp->md5sig_info->alloced6 = 0; | ||
672 | |||
673 | if (tp->md5sig_info->entries4) { | ||
674 | for (i = 0; i < tp->md5sig_info->entries4; i++) | ||
675 | kfree(tp->md5sig_info->keys4[i].key); | ||
676 | tp->md5sig_info->entries4 = 0; | ||
677 | tcp_free_md5sig_pool(); | ||
678 | } | ||
679 | |||
680 | kfree(tp->md5sig_info->keys4); | ||
681 | tp->md5sig_info->keys4 = NULL; | ||
682 | tp->md5sig_info->alloced4 = 0; | ||
683 | } | ||
684 | |||
685 | static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, | ||
686 | int optlen) | ||
687 | { | ||
688 | struct tcp_md5sig cmd; | ||
689 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; | ||
690 | u8 *newkey; | ||
691 | |||
692 | if (optlen < sizeof(cmd)) | ||
693 | return -EINVAL; | ||
694 | |||
695 | if (copy_from_user(&cmd, optval, sizeof(cmd))) | ||
696 | return -EFAULT; | ||
697 | |||
698 | if (sin6->sin6_family != AF_INET6) | ||
699 | return -EINVAL; | ||
700 | |||
701 | if (!cmd.tcpm_keylen) { | ||
702 | if (!tcp_sk(sk)->md5sig_info) | ||
703 | return -ENOENT; | ||
704 | if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) | ||
705 | return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]); | ||
706 | return tcp_v6_md5_do_del(sk, &sin6->sin6_addr); | ||
707 | } | ||
708 | |||
709 | if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) | ||
710 | return -EINVAL; | ||
711 | |||
712 | if (!tcp_sk(sk)->md5sig_info) { | ||
713 | struct tcp_sock *tp = tcp_sk(sk); | ||
714 | struct tcp_md5sig_info *p; | ||
715 | |||
716 | p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL); | ||
717 | if (!p) | ||
718 | return -ENOMEM; | ||
719 | |||
720 | tp->md5sig_info = p; | ||
721 | } | ||
722 | |||
723 | newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); | ||
724 | if (!newkey) | ||
725 | return -ENOMEM; | ||
726 | if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) { | ||
727 | return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3], | ||
728 | newkey, cmd.tcpm_keylen); | ||
729 | } | ||
730 | return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen); | ||
731 | } | ||
732 | |||
733 | static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | ||
734 | struct in6_addr *saddr, | ||
735 | struct in6_addr *daddr, | ||
736 | struct tcphdr *th, int protocol, | ||
737 | int tcplen) | ||
738 | { | ||
739 | struct scatterlist sg[4]; | ||
740 | __u16 data_len; | ||
741 | int block = 0; | ||
742 | __sum16 cksum; | ||
743 | struct tcp_md5sig_pool *hp; | ||
744 | struct tcp6_pseudohdr *bp; | ||
745 | struct hash_desc *desc; | ||
746 | int err; | ||
747 | unsigned int nbytes = 0; | ||
748 | |||
749 | hp = tcp_get_md5sig_pool(); | ||
750 | if (!hp) { | ||
751 | printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__); | ||
752 | goto clear_hash_noput; | ||
753 | } | ||
754 | bp = &hp->md5_blk.ip6; | ||
755 | desc = &hp->md5_desc; | ||
756 | |||
757 | /* 1. TCP pseudo-header (RFC2460) */ | ||
758 | ipv6_addr_copy(&bp->saddr, saddr); | ||
759 | ipv6_addr_copy(&bp->daddr, daddr); | ||
760 | bp->len = htonl(tcplen); | ||
761 | bp->protocol = htonl(protocol); | ||
762 | |||
763 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); | ||
764 | nbytes += sizeof(*bp); | ||
765 | |||
766 | /* 2. TCP header, excluding options */ | ||
767 | cksum = th->check; | ||
768 | th->check = 0; | ||
769 | sg_set_buf(&sg[block++], th, sizeof(*th)); | ||
770 | nbytes += sizeof(*th); | ||
771 | |||
772 | /* 3. TCP segment data (if any) */ | ||
773 | data_len = tcplen - (th->doff << 2); | ||
774 | if (data_len > 0) { | ||
775 | u8 *data = (u8 *)th + (th->doff << 2); | ||
776 | sg_set_buf(&sg[block++], data, data_len); | ||
777 | nbytes += data_len; | ||
778 | } | ||
779 | |||
780 | /* 4. shared key */ | ||
781 | sg_set_buf(&sg[block++], key->key, key->keylen); | ||
782 | nbytes += key->keylen; | ||
783 | |||
784 | /* Now store the hash into the packet */ | ||
785 | err = crypto_hash_init(desc); | ||
786 | if (err) { | ||
787 | printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__); | ||
788 | goto clear_hash; | ||
789 | } | ||
790 | err = crypto_hash_update(desc, sg, nbytes); | ||
791 | if (err) { | ||
792 | printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__); | ||
793 | goto clear_hash; | ||
794 | } | ||
795 | err = crypto_hash_final(desc, md5_hash); | ||
796 | if (err) { | ||
797 | printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__); | ||
798 | goto clear_hash; | ||
799 | } | ||
800 | |||
801 | /* Reset header, and free up the crypto */ | ||
802 | tcp_put_md5sig_pool(); | ||
803 | th->check = cksum; | ||
804 | out: | ||
805 | return 0; | ||
806 | clear_hash: | ||
807 | tcp_put_md5sig_pool(); | ||
808 | clear_hash_noput: | ||
809 | memset(md5_hash, 0, 16); | ||
810 | goto out; | ||
811 | } | ||
812 | |||
813 | static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | ||
814 | struct sock *sk, | ||
815 | struct dst_entry *dst, | ||
816 | struct request_sock *req, | ||
817 | struct tcphdr *th, int protocol, | ||
818 | int tcplen) | ||
819 | { | ||
820 | struct in6_addr *saddr, *daddr; | ||
821 | |||
822 | if (sk) { | ||
823 | saddr = &inet6_sk(sk)->saddr; | ||
824 | daddr = &inet6_sk(sk)->daddr; | ||
825 | } else { | ||
826 | saddr = &inet6_rsk(req)->loc_addr; | ||
827 | daddr = &inet6_rsk(req)->rmt_addr; | ||
828 | } | ||
829 | return tcp_v6_do_calc_md5_hash(md5_hash, key, | ||
830 | saddr, daddr, | ||
831 | th, protocol, tcplen); | ||
832 | } | ||
833 | |||
834 | static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) | ||
835 | { | ||
836 | __u8 *hash_location = NULL; | ||
837 | struct tcp_md5sig_key *hash_expected; | ||
838 | struct ipv6hdr *ip6h = skb->nh.ipv6h; | ||
839 | struct tcphdr *th = skb->h.th; | ||
840 | int length = (th->doff << 2) - sizeof (*th); | ||
841 | int genhash; | ||
842 | u8 *ptr; | ||
843 | u8 newhash[16]; | ||
844 | |||
845 | hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); | ||
846 | |||
847 | /* If the TCP option is too short, we can short cut */ | ||
848 | if (length < TCPOLEN_MD5SIG) | ||
849 | return hash_expected ? 1 : 0; | ||
850 | |||
851 | /* parse options */ | ||
852 | ptr = (u8*)(th + 1); | ||
853 | while (length > 0) { | ||
854 | int opcode = *ptr++; | ||
855 | int opsize; | ||
856 | |||
857 | switch(opcode) { | ||
858 | case TCPOPT_EOL: | ||
859 | goto done_opts; | ||
860 | case TCPOPT_NOP: | ||
861 | length--; | ||
862 | continue; | ||
863 | default: | ||
864 | opsize = *ptr++; | ||
865 | if (opsize < 2 || opsize > length) | ||
866 | goto done_opts; | ||
867 | if (opcode == TCPOPT_MD5SIG) { | ||
868 | hash_location = ptr; | ||
869 | goto done_opts; | ||
870 | } | ||
871 | } | ||
872 | ptr += opsize - 2; | ||
873 | length -= opsize; | ||
874 | } | ||
875 | |||
876 | done_opts: | ||
877 | /* do we have a hash as expected? */ | ||
878 | if (!hash_expected) { | ||
879 | if (!hash_location) | ||
880 | return 0; | ||
881 | if (net_ratelimit()) { | ||
882 | printk(KERN_INFO "MD5 Hash NOT expected but found " | ||
883 | "(" NIP6_FMT ", %u)->" | ||
884 | "(" NIP6_FMT ", %u)\n", | ||
885 | NIP6(ip6h->saddr), ntohs(th->source), | ||
886 | NIP6(ip6h->daddr), ntohs(th->dest)); | ||
887 | } | ||
888 | return 1; | ||
889 | } | ||
890 | |||
891 | if (!hash_location) { | ||
892 | if (net_ratelimit()) { | ||
893 | printk(KERN_INFO "MD5 Hash expected but NOT found " | ||
894 | "(" NIP6_FMT ", %u)->" | ||
895 | "(" NIP6_FMT ", %u)\n", | ||
896 | NIP6(ip6h->saddr), ntohs(th->source), | ||
897 | NIP6(ip6h->daddr), ntohs(th->dest)); | ||
898 | } | ||
899 | return 1; | ||
900 | } | ||
901 | |||
902 | /* check the signature */ | ||
903 | genhash = tcp_v6_do_calc_md5_hash(newhash, | ||
904 | hash_expected, | ||
905 | &ip6h->saddr, &ip6h->daddr, | ||
906 | th, sk->sk_protocol, | ||
907 | skb->len); | ||
908 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { | ||
909 | if (net_ratelimit()) { | ||
910 | printk(KERN_INFO "MD5 Hash %s for " | ||
911 | "(" NIP6_FMT ", %u)->" | ||
912 | "(" NIP6_FMT ", %u)\n", | ||
913 | genhash ? "failed" : "mismatch", | ||
914 | NIP6(ip6h->saddr), ntohs(th->source), | ||
915 | NIP6(ip6h->daddr), ntohs(th->dest)); | ||
916 | } | ||
917 | return 1; | ||
918 | } | ||
919 | return 0; | ||
920 | } | ||
921 | #endif | ||
922 | |||
923 | static struct request_sock_ops tcp6_request_sock_ops __read_mostly = { | ||
530 | .family = AF_INET6, | 924 | .family = AF_INET6, |
531 | .obj_size = sizeof(struct tcp6_request_sock), | 925 | .obj_size = sizeof(struct tcp6_request_sock), |
532 | .rtx_syn_ack = tcp_v6_send_synack, | 926 | .rtx_syn_ack = tcp_v6_send_synack, |
@@ -535,9 +929,16 @@ static struct request_sock_ops tcp6_request_sock_ops = { | |||
535 | .send_reset = tcp_v6_send_reset | 929 | .send_reset = tcp_v6_send_reset |
536 | }; | 930 | }; |
537 | 931 | ||
932 | #ifdef CONFIG_TCP_MD5SIG | ||
933 | static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { | ||
934 | .md5_lookup = tcp_v6_reqsk_md5_lookup, | ||
935 | }; | ||
936 | #endif | ||
937 | |||
538 | static struct timewait_sock_ops tcp6_timewait_sock_ops = { | 938 | static struct timewait_sock_ops tcp6_timewait_sock_ops = { |
539 | .twsk_obj_size = sizeof(struct tcp6_timewait_sock), | 939 | .twsk_obj_size = sizeof(struct tcp6_timewait_sock), |
540 | .twsk_unique = tcp_twsk_unique, | 940 | .twsk_unique = tcp_twsk_unique, |
941 | .twsk_destructor= tcp_twsk_destructor, | ||
541 | }; | 942 | }; |
542 | 943 | ||
543 | static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) | 944 | static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) |
@@ -547,7 +948,7 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) | |||
547 | 948 | ||
548 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 949 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
549 | th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); | 950 | th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); |
550 | skb->csum = offsetof(struct tcphdr, check); | 951 | skb->csum_offset = offsetof(struct tcphdr, check); |
551 | } else { | 952 | } else { |
552 | th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, | 953 | th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, |
553 | csum_partial((char *)th, th->doff<<2, | 954 | csum_partial((char *)th, th->doff<<2, |
@@ -569,16 +970,20 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) | |||
569 | th->check = 0; | 970 | th->check = 0; |
570 | th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, | 971 | th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, |
571 | IPPROTO_TCP, 0); | 972 | IPPROTO_TCP, 0); |
572 | skb->csum = offsetof(struct tcphdr, check); | 973 | skb->csum_offset = offsetof(struct tcphdr, check); |
573 | skb->ip_summed = CHECKSUM_PARTIAL; | 974 | skb->ip_summed = CHECKSUM_PARTIAL; |
574 | return 0; | 975 | return 0; |
575 | } | 976 | } |
576 | 977 | ||
577 | static void tcp_v6_send_reset(struct sk_buff *skb) | 978 | static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) |
578 | { | 979 | { |
579 | struct tcphdr *th = skb->h.th, *t1; | 980 | struct tcphdr *th = skb->h.th, *t1; |
580 | struct sk_buff *buff; | 981 | struct sk_buff *buff; |
581 | struct flowi fl; | 982 | struct flowi fl; |
983 | int tot_len = sizeof(*th); | ||
984 | #ifdef CONFIG_TCP_MD5SIG | ||
985 | struct tcp_md5sig_key *key; | ||
986 | #endif | ||
582 | 987 | ||
583 | if (th->rst) | 988 | if (th->rst) |
584 | return; | 989 | return; |
@@ -586,25 +991,35 @@ static void tcp_v6_send_reset(struct sk_buff *skb) | |||
586 | if (!ipv6_unicast_destination(skb)) | 991 | if (!ipv6_unicast_destination(skb)) |
587 | return; | 992 | return; |
588 | 993 | ||
994 | #ifdef CONFIG_TCP_MD5SIG | ||
995 | if (sk) | ||
996 | key = tcp_v6_md5_do_lookup(sk, &skb->nh.ipv6h->daddr); | ||
997 | else | ||
998 | key = NULL; | ||
999 | |||
1000 | if (key) | ||
1001 | tot_len += TCPOLEN_MD5SIG_ALIGNED; | ||
1002 | #endif | ||
1003 | |||
589 | /* | 1004 | /* |
590 | * We need to grab some memory, and put together an RST, | 1005 | * We need to grab some memory, and put together an RST, |
591 | * and then put it into the queue to be sent. | 1006 | * and then put it into the queue to be sent. |
592 | */ | 1007 | */ |
593 | 1008 | ||
594 | buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr), | 1009 | buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, |
595 | GFP_ATOMIC); | 1010 | GFP_ATOMIC); |
596 | if (buff == NULL) | 1011 | if (buff == NULL) |
597 | return; | 1012 | return; |
598 | 1013 | ||
599 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr)); | 1014 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); |
600 | 1015 | ||
601 | t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr)); | 1016 | t1 = (struct tcphdr *) skb_push(buff, tot_len); |
602 | 1017 | ||
603 | /* Swap the send and the receive. */ | 1018 | /* Swap the send and the receive. */ |
604 | memset(t1, 0, sizeof(*t1)); | 1019 | memset(t1, 0, sizeof(*t1)); |
605 | t1->dest = th->source; | 1020 | t1->dest = th->source; |
606 | t1->source = th->dest; | 1021 | t1->source = th->dest; |
607 | t1->doff = sizeof(*t1)/4; | 1022 | t1->doff = tot_len / 4; |
608 | t1->rst = 1; | 1023 | t1->rst = 1; |
609 | 1024 | ||
610 | if(th->ack) { | 1025 | if(th->ack) { |
@@ -615,6 +1030,22 @@ static void tcp_v6_send_reset(struct sk_buff *skb) | |||
615 | + skb->len - (th->doff<<2)); | 1030 | + skb->len - (th->doff<<2)); |
616 | } | 1031 | } |
617 | 1032 | ||
1033 | #ifdef CONFIG_TCP_MD5SIG | ||
1034 | if (key) { | ||
1035 | __be32 *opt = (__be32*)(t1 + 1); | ||
1036 | opt[0] = htonl((TCPOPT_NOP << 24) | | ||
1037 | (TCPOPT_NOP << 16) | | ||
1038 | (TCPOPT_MD5SIG << 8) | | ||
1039 | TCPOLEN_MD5SIG); | ||
1040 | tcp_v6_do_calc_md5_hash((__u8*)&opt[1], | ||
1041 | key, | ||
1042 | &skb->nh.ipv6h->daddr, | ||
1043 | &skb->nh.ipv6h->saddr, | ||
1044 | t1, IPPROTO_TCP, | ||
1045 | tot_len); | ||
1046 | } | ||
1047 | #endif | ||
1048 | |||
618 | buff->csum = csum_partial((char *)t1, sizeof(*t1), 0); | 1049 | buff->csum = csum_partial((char *)t1, sizeof(*t1), 0); |
619 | 1050 | ||
620 | memset(&fl, 0, sizeof(fl)); | 1051 | memset(&fl, 0, sizeof(fl)); |
@@ -645,15 +1076,37 @@ static void tcp_v6_send_reset(struct sk_buff *skb) | |||
645 | kfree_skb(buff); | 1076 | kfree_skb(buff); |
646 | } | 1077 | } |
647 | 1078 | ||
648 | static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) | 1079 | static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, |
1080 | struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) | ||
649 | { | 1081 | { |
650 | struct tcphdr *th = skb->h.th, *t1; | 1082 | struct tcphdr *th = skb->h.th, *t1; |
651 | struct sk_buff *buff; | 1083 | struct sk_buff *buff; |
652 | struct flowi fl; | 1084 | struct flowi fl; |
653 | int tot_len = sizeof(struct tcphdr); | 1085 | int tot_len = sizeof(struct tcphdr); |
1086 | __be32 *topt; | ||
1087 | #ifdef CONFIG_TCP_MD5SIG | ||
1088 | struct tcp_md5sig_key *key; | ||
1089 | struct tcp_md5sig_key tw_key; | ||
1090 | #endif | ||
1091 | |||
1092 | #ifdef CONFIG_TCP_MD5SIG | ||
1093 | if (!tw && skb->sk) { | ||
1094 | key = tcp_v6_md5_do_lookup(skb->sk, &skb->nh.ipv6h->daddr); | ||
1095 | } else if (tw && tw->tw_md5_keylen) { | ||
1096 | tw_key.key = tw->tw_md5_key; | ||
1097 | tw_key.keylen = tw->tw_md5_keylen; | ||
1098 | key = &tw_key; | ||
1099 | } else { | ||
1100 | key = NULL; | ||
1101 | } | ||
1102 | #endif | ||
654 | 1103 | ||
655 | if (ts) | 1104 | if (ts) |
656 | tot_len += TCPOLEN_TSTAMP_ALIGNED; | 1105 | tot_len += TCPOLEN_TSTAMP_ALIGNED; |
1106 | #ifdef CONFIG_TCP_MD5SIG | ||
1107 | if (key) | ||
1108 | tot_len += TCPOLEN_MD5SIG_ALIGNED; | ||
1109 | #endif | ||
657 | 1110 | ||
658 | buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, | 1111 | buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, |
659 | GFP_ATOMIC); | 1112 | GFP_ATOMIC); |
@@ -673,15 +1126,29 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 | |||
673 | t1->ack_seq = htonl(ack); | 1126 | t1->ack_seq = htonl(ack); |
674 | t1->ack = 1; | 1127 | t1->ack = 1; |
675 | t1->window = htons(win); | 1128 | t1->window = htons(win); |
1129 | |||
1130 | topt = (__be32 *)(t1 + 1); | ||
676 | 1131 | ||
677 | if (ts) { | 1132 | if (ts) { |
678 | u32 *ptr = (u32*)(t1 + 1); | 1133 | *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | |
679 | *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | 1134 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); |
680 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); | 1135 | *topt++ = htonl(tcp_time_stamp); |
681 | *ptr++ = htonl(tcp_time_stamp); | 1136 | *topt = htonl(ts); |
682 | *ptr = htonl(ts); | ||
683 | } | 1137 | } |
684 | 1138 | ||
1139 | #ifdef CONFIG_TCP_MD5SIG | ||
1140 | if (key) { | ||
1141 | *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | ||
1142 | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); | ||
1143 | tcp_v6_do_calc_md5_hash((__u8 *)topt, | ||
1144 | key, | ||
1145 | &skb->nh.ipv6h->daddr, | ||
1146 | &skb->nh.ipv6h->saddr, | ||
1147 | t1, IPPROTO_TCP, | ||
1148 | tot_len); | ||
1149 | } | ||
1150 | #endif | ||
1151 | |||
685 | buff->csum = csum_partial((char *)t1, tot_len, 0); | 1152 | buff->csum = csum_partial((char *)t1, tot_len, 0); |
686 | 1153 | ||
687 | memset(&fl, 0, sizeof(fl)); | 1154 | memset(&fl, 0, sizeof(fl)); |
@@ -712,9 +1179,9 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 | |||
712 | static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) | 1179 | static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) |
713 | { | 1180 | { |
714 | struct inet_timewait_sock *tw = inet_twsk(sk); | 1181 | struct inet_timewait_sock *tw = inet_twsk(sk); |
715 | const struct tcp_timewait_sock *tcptw = tcp_twsk(sk); | 1182 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk); |
716 | 1183 | ||
717 | tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, | 1184 | tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, |
718 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, | 1185 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, |
719 | tcptw->tw_ts_recent); | 1186 | tcptw->tw_ts_recent); |
720 | 1187 | ||
@@ -723,7 +1190,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) | |||
723 | 1190 | ||
724 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) | 1191 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) |
725 | { | 1192 | { |
726 | tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); | 1193 | tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); |
727 | } | 1194 | } |
728 | 1195 | ||
729 | 1196 | ||
@@ -794,6 +1261,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
794 | if (req == NULL) | 1261 | if (req == NULL) |
795 | goto drop; | 1262 | goto drop; |
796 | 1263 | ||
1264 | #ifdef CONFIG_TCP_MD5SIG | ||
1265 | tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops; | ||
1266 | #endif | ||
1267 | |||
797 | tcp_clear_options(&tmp_opt); | 1268 | tcp_clear_options(&tmp_opt); |
798 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); | 1269 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); |
799 | tmp_opt.user_mss = tp->rx_opt.user_mss; | 1270 | tmp_opt.user_mss = tp->rx_opt.user_mss; |
@@ -822,7 +1293,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
822 | treq->iif = inet6_iif(skb); | 1293 | treq->iif = inet6_iif(skb); |
823 | 1294 | ||
824 | if (isn == 0) | 1295 | if (isn == 0) |
825 | isn = tcp_v6_init_sequence(sk,skb); | 1296 | isn = tcp_v6_init_sequence(skb); |
826 | 1297 | ||
827 | tcp_rsk(req)->snt_isn = isn; | 1298 | tcp_rsk(req)->snt_isn = isn; |
828 | 1299 | ||
@@ -852,6 +1323,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
852 | struct tcp_sock *newtp; | 1323 | struct tcp_sock *newtp; |
853 | struct sock *newsk; | 1324 | struct sock *newsk; |
854 | struct ipv6_txoptions *opt; | 1325 | struct ipv6_txoptions *opt; |
1326 | #ifdef CONFIG_TCP_MD5SIG | ||
1327 | struct tcp_md5sig_key *key; | ||
1328 | #endif | ||
855 | 1329 | ||
856 | if (skb->protocol == htons(ETH_P_IP)) { | 1330 | if (skb->protocol == htons(ETH_P_IP)) { |
857 | /* | 1331 | /* |
@@ -882,6 +1356,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
882 | 1356 | ||
883 | inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; | 1357 | inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; |
884 | newsk->sk_backlog_rcv = tcp_v4_do_rcv; | 1358 | newsk->sk_backlog_rcv = tcp_v4_do_rcv; |
1359 | #ifdef CONFIG_TCP_MD5SIG | ||
1360 | newtp->af_specific = &tcp_sock_ipv6_mapped_specific; | ||
1361 | #endif | ||
1362 | |||
885 | newnp->pktoptions = NULL; | 1363 | newnp->pktoptions = NULL; |
886 | newnp->opt = NULL; | 1364 | newnp->opt = NULL; |
887 | newnp->mcast_oif = inet6_iif(skb); | 1365 | newnp->mcast_oif = inet6_iif(skb); |
@@ -1016,6 +1494,21 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1016 | 1494 | ||
1017 | newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; | 1495 | newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; |
1018 | 1496 | ||
1497 | #ifdef CONFIG_TCP_MD5SIG | ||
1498 | /* Copy over the MD5 key from the original socket */ | ||
1499 | if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) { | ||
1500 | /* We're using one, so create a matching key | ||
1501 | * on the newsk structure. If we fail to get | ||
1502 | * memory, then we end up not copying the key | ||
1503 | * across. Shucks. | ||
1504 | */ | ||
1505 | char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); | ||
1506 | if (newkey != NULL) | ||
1507 | tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr, | ||
1508 | newkey, key->keylen); | ||
1509 | } | ||
1510 | #endif | ||
1511 | |||
1019 | __inet6_hash(&tcp_hashinfo, newsk); | 1512 | __inet6_hash(&tcp_hashinfo, newsk); |
1020 | inet_inherit_port(&tcp_hashinfo, sk, newsk); | 1513 | inet_inherit_port(&tcp_hashinfo, sk, newsk); |
1021 | 1514 | ||
@@ -1031,7 +1524,7 @@ out: | |||
1031 | return NULL; | 1524 | return NULL; |
1032 | } | 1525 | } |
1033 | 1526 | ||
1034 | static int tcp_v6_checksum_init(struct sk_buff *skb) | 1527 | static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) |
1035 | { | 1528 | { |
1036 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | 1529 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
1037 | if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, | 1530 | if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, |
@@ -1041,8 +1534,8 @@ static int tcp_v6_checksum_init(struct sk_buff *skb) | |||
1041 | } | 1534 | } |
1042 | } | 1535 | } |
1043 | 1536 | ||
1044 | skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, | 1537 | skb->csum = ~csum_unfold(tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, |
1045 | &skb->nh.ipv6h->daddr, 0); | 1538 | &skb->nh.ipv6h->daddr, 0)); |
1046 | 1539 | ||
1047 | if (skb->len <= 76) { | 1540 | if (skb->len <= 76) { |
1048 | return __skb_checksum_complete(skb); | 1541 | return __skb_checksum_complete(skb); |
@@ -1075,6 +1568,11 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1075 | if (skb->protocol == htons(ETH_P_IP)) | 1568 | if (skb->protocol == htons(ETH_P_IP)) |
1076 | return tcp_v4_do_rcv(sk, skb); | 1569 | return tcp_v4_do_rcv(sk, skb); |
1077 | 1570 | ||
1571 | #ifdef CONFIG_TCP_MD5SIG | ||
1572 | if (tcp_v6_inbound_md5_hash (sk, skb)) | ||
1573 | goto discard; | ||
1574 | #endif | ||
1575 | |||
1078 | if (sk_filter(sk, skb)) | 1576 | if (sk_filter(sk, skb)) |
1079 | goto discard; | 1577 | goto discard; |
1080 | 1578 | ||
@@ -1140,7 +1638,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1140 | return 0; | 1638 | return 0; |
1141 | 1639 | ||
1142 | reset: | 1640 | reset: |
1143 | tcp_v6_send_reset(skb); | 1641 | tcp_v6_send_reset(sk, skb); |
1144 | discard: | 1642 | discard: |
1145 | if (opt_skb) | 1643 | if (opt_skb) |
1146 | __kfree_skb(opt_skb); | 1644 | __kfree_skb(opt_skb); |
@@ -1265,7 +1763,7 @@ no_tcp_socket: | |||
1265 | bad_packet: | 1763 | bad_packet: |
1266 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 1764 | TCP_INC_STATS_BH(TCP_MIB_INERRS); |
1267 | } else { | 1765 | } else { |
1268 | tcp_v6_send_reset(skb); | 1766 | tcp_v6_send_reset(NULL, skb); |
1269 | } | 1767 | } |
1270 | 1768 | ||
1271 | discard_it: | 1769 | discard_it: |
@@ -1344,6 +1842,15 @@ static struct inet_connection_sock_af_ops ipv6_specific = { | |||
1344 | #endif | 1842 | #endif |
1345 | }; | 1843 | }; |
1346 | 1844 | ||
1845 | #ifdef CONFIG_TCP_MD5SIG | ||
1846 | static struct tcp_sock_af_ops tcp_sock_ipv6_specific = { | ||
1847 | .md5_lookup = tcp_v6_md5_lookup, | ||
1848 | .calc_md5_hash = tcp_v6_calc_md5_hash, | ||
1849 | .md5_add = tcp_v6_md5_add_func, | ||
1850 | .md5_parse = tcp_v6_parse_md5_keys, | ||
1851 | }; | ||
1852 | #endif | ||
1853 | |||
1347 | /* | 1854 | /* |
1348 | * TCP over IPv4 via INET6 API | 1855 | * TCP over IPv4 via INET6 API |
1349 | */ | 1856 | */ |
@@ -1366,6 +1873,15 @@ static struct inet_connection_sock_af_ops ipv6_mapped = { | |||
1366 | #endif | 1873 | #endif |
1367 | }; | 1874 | }; |
1368 | 1875 | ||
1876 | #ifdef CONFIG_TCP_MD5SIG | ||
1877 | static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { | ||
1878 | .md5_lookup = tcp_v4_md5_lookup, | ||
1879 | .calc_md5_hash = tcp_v4_calc_md5_hash, | ||
1880 | .md5_add = tcp_v6_md5_add_func, | ||
1881 | .md5_parse = tcp_v6_parse_md5_keys, | ||
1882 | }; | ||
1883 | #endif | ||
1884 | |||
1369 | /* NOTE: A lot of things set to zero explicitly by call to | 1885 | /* NOTE: A lot of things set to zero explicitly by call to |
1370 | * sk_alloc() so need not be done here. | 1886 | * sk_alloc() so need not be done here. |
1371 | */ | 1887 | */ |
@@ -1405,6 +1921,10 @@ static int tcp_v6_init_sock(struct sock *sk) | |||
1405 | sk->sk_write_space = sk_stream_write_space; | 1921 | sk->sk_write_space = sk_stream_write_space; |
1406 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | 1922 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); |
1407 | 1923 | ||
1924 | #ifdef CONFIG_TCP_MD5SIG | ||
1925 | tp->af_specific = &tcp_sock_ipv6_specific; | ||
1926 | #endif | ||
1927 | |||
1408 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; | 1928 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; |
1409 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; | 1929 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; |
1410 | 1930 | ||
@@ -1415,6 +1935,11 @@ static int tcp_v6_init_sock(struct sock *sk) | |||
1415 | 1935 | ||
1416 | static int tcp_v6_destroy_sock(struct sock *sk) | 1936 | static int tcp_v6_destroy_sock(struct sock *sk) |
1417 | { | 1937 | { |
1938 | #ifdef CONFIG_TCP_MD5SIG | ||
1939 | /* Clean up the MD5 key list */ | ||
1940 | if (tcp_sk(sk)->md5sig_info) | ||
1941 | tcp_v6_clear_md5_list(sk); | ||
1942 | #endif | ||
1418 | tcp_v4_destroy_sock(sk); | 1943 | tcp_v4_destroy_sock(sk); |
1419 | return inet6_destroy_sock(sk); | 1944 | return inet6_destroy_sock(sk); |
1420 | } | 1945 | } |
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c index 0ef9a35798d1..918d07dd1219 100644 --- a/net/ipv6/tunnel6.c +++ b/net/ipv6/tunnel6.c | |||
@@ -104,7 +104,7 @@ drop: | |||
104 | } | 104 | } |
105 | 105 | ||
106 | static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 106 | static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
107 | int type, int code, int offset, __u32 info) | 107 | int type, int code, int offset, __be32 info) |
108 | { | 108 | { |
109 | struct xfrm6_tunnel *handler; | 109 | struct xfrm6_tunnel *handler; |
110 | 110 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index c83f23e51c46..f52a5c3cc0a3 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -38,26 +38,18 @@ | |||
38 | #include <linux/skbuff.h> | 38 | #include <linux/skbuff.h> |
39 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
40 | 40 | ||
41 | #include <net/sock.h> | ||
42 | #include <net/snmp.h> | ||
43 | |||
44 | #include <net/ipv6.h> | ||
45 | #include <net/ndisc.h> | 41 | #include <net/ndisc.h> |
46 | #include <net/protocol.h> | 42 | #include <net/protocol.h> |
47 | #include <net/transp_v6.h> | 43 | #include <net/transp_v6.h> |
48 | #include <net/ip6_route.h> | 44 | #include <net/ip6_route.h> |
49 | #include <net/addrconf.h> | ||
50 | #include <net/ip.h> | ||
51 | #include <net/udp.h> | ||
52 | #include <net/raw.h> | 45 | #include <net/raw.h> |
53 | #include <net/inet_common.h> | ||
54 | #include <net/tcp_states.h> | 46 | #include <net/tcp_states.h> |
55 | |||
56 | #include <net/ip6_checksum.h> | 47 | #include <net/ip6_checksum.h> |
57 | #include <net/xfrm.h> | 48 | #include <net/xfrm.h> |
58 | 49 | ||
59 | #include <linux/proc_fs.h> | 50 | #include <linux/proc_fs.h> |
60 | #include <linux/seq_file.h> | 51 | #include <linux/seq_file.h> |
52 | #include "udp_impl.h" | ||
61 | 53 | ||
62 | DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly; | 54 | DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly; |
63 | 55 | ||
@@ -66,23 +58,9 @@ static inline int udp_v6_get_port(struct sock *sk, unsigned short snum) | |||
66 | return udp_get_port(sk, snum, ipv6_rcv_saddr_equal); | 58 | return udp_get_port(sk, snum, ipv6_rcv_saddr_equal); |
67 | } | 59 | } |
68 | 60 | ||
69 | static void udp_v6_hash(struct sock *sk) | 61 | static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport, |
70 | { | 62 | struct in6_addr *daddr, __be16 dport, |
71 | BUG(); | 63 | int dif, struct hlist_head udptable[]) |
72 | } | ||
73 | |||
74 | static void udp_v6_unhash(struct sock *sk) | ||
75 | { | ||
76 | write_lock_bh(&udp_hash_lock); | ||
77 | if (sk_del_node_init(sk)) { | ||
78 | inet_sk(sk)->num = 0; | ||
79 | sock_prot_dec_use(sk->sk_prot); | ||
80 | } | ||
81 | write_unlock_bh(&udp_hash_lock); | ||
82 | } | ||
83 | |||
84 | static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport, | ||
85 | struct in6_addr *daddr, u16 dport, int dif) | ||
86 | { | 64 | { |
87 | struct sock *sk, *result = NULL; | 65 | struct sock *sk, *result = NULL; |
88 | struct hlist_node *node; | 66 | struct hlist_node *node; |
@@ -90,7 +68,7 @@ static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport, | |||
90 | int badness = -1; | 68 | int badness = -1; |
91 | 69 | ||
92 | read_lock(&udp_hash_lock); | 70 | read_lock(&udp_hash_lock); |
93 | sk_for_each(sk, node, &udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]) { | 71 | sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { |
94 | struct inet_sock *inet = inet_sk(sk); | 72 | struct inet_sock *inet = inet_sk(sk); |
95 | 73 | ||
96 | if (inet->num == hnum && sk->sk_family == PF_INET6) { | 74 | if (inet->num == hnum && sk->sk_family == PF_INET6) { |
@@ -132,20 +110,11 @@ static struct sock *udp_v6_lookup(struct in6_addr *saddr, u16 sport, | |||
132 | } | 110 | } |
133 | 111 | ||
134 | /* | 112 | /* |
135 | * | ||
136 | */ | ||
137 | |||
138 | static void udpv6_close(struct sock *sk, long timeout) | ||
139 | { | ||
140 | sk_common_release(sk); | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * This should be easy, if there is something there we | 113 | * This should be easy, if there is something there we |
145 | * return it, otherwise we block. | 114 | * return it, otherwise we block. |
146 | */ | 115 | */ |
147 | 116 | ||
148 | static int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | 117 | int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, |
149 | struct msghdr *msg, size_t len, | 118 | struct msghdr *msg, size_t len, |
150 | int noblock, int flags, int *addr_len) | 119 | int noblock, int flags, int *addr_len) |
151 | { | 120 | { |
@@ -153,7 +122,7 @@ static int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
153 | struct inet_sock *inet = inet_sk(sk); | 122 | struct inet_sock *inet = inet_sk(sk); |
154 | struct sk_buff *skb; | 123 | struct sk_buff *skb; |
155 | size_t copied; | 124 | size_t copied; |
156 | int err; | 125 | int err, copy_only, is_udplite = IS_UDPLITE(sk); |
157 | 126 | ||
158 | if (addr_len) | 127 | if (addr_len) |
159 | *addr_len=sizeof(struct sockaddr_in6); | 128 | *addr_len=sizeof(struct sockaddr_in6); |
@@ -172,15 +141,21 @@ try_again: | |||
172 | msg->msg_flags |= MSG_TRUNC; | 141 | msg->msg_flags |= MSG_TRUNC; |
173 | } | 142 | } |
174 | 143 | ||
175 | if (skb->ip_summed==CHECKSUM_UNNECESSARY) { | 144 | /* |
176 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, | 145 | * Decide whether to checksum and/or copy data. |
177 | copied); | 146 | */ |
178 | } else if (msg->msg_flags&MSG_TRUNC) { | 147 | copy_only = (skb->ip_summed==CHECKSUM_UNNECESSARY); |
179 | if (__skb_checksum_complete(skb)) | 148 | |
149 | if (is_udplite || (!copy_only && msg->msg_flags&MSG_TRUNC)) { | ||
150 | if (__udp_lib_checksum_complete(skb)) | ||
180 | goto csum_copy_err; | 151 | goto csum_copy_err; |
181 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, | 152 | copy_only = 1; |
182 | copied); | 153 | } |
183 | } else { | 154 | |
155 | if (copy_only) | ||
156 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), | ||
157 | msg->msg_iov, copied ); | ||
158 | else { | ||
184 | err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); | 159 | err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); |
185 | if (err == -EINVAL) | 160 | if (err == -EINVAL) |
186 | goto csum_copy_err; | 161 | goto csum_copy_err; |
@@ -231,14 +206,15 @@ csum_copy_err: | |||
231 | skb_kill_datagram(sk, skb, flags); | 206 | skb_kill_datagram(sk, skb, flags); |
232 | 207 | ||
233 | if (flags & MSG_DONTWAIT) { | 208 | if (flags & MSG_DONTWAIT) { |
234 | UDP6_INC_STATS_USER(UDP_MIB_INERRORS); | 209 | UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); |
235 | return -EAGAIN; | 210 | return -EAGAIN; |
236 | } | 211 | } |
237 | goto try_again; | 212 | goto try_again; |
238 | } | 213 | } |
239 | 214 | ||
240 | static void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 215 | void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
241 | int type, int code, int offset, __u32 info) | 216 | int type, int code, int offset, __be32 info, |
217 | struct hlist_head udptable[] ) | ||
242 | { | 218 | { |
243 | struct ipv6_pinfo *np; | 219 | struct ipv6_pinfo *np; |
244 | struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; | 220 | struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; |
@@ -248,8 +224,8 @@ static void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
248 | struct sock *sk; | 224 | struct sock *sk; |
249 | int err; | 225 | int err; |
250 | 226 | ||
251 | sk = udp_v6_lookup(daddr, uh->dest, saddr, uh->source, inet6_iif(skb)); | 227 | sk = __udp6_lib_lookup(daddr, uh->dest, |
252 | 228 | saddr, uh->source, inet6_iif(skb), udptable); | |
253 | if (sk == NULL) | 229 | if (sk == NULL) |
254 | return; | 230 | return; |
255 | 231 | ||
@@ -270,36 +246,60 @@ out: | |||
270 | sock_put(sk); | 246 | sock_put(sk); |
271 | } | 247 | } |
272 | 248 | ||
273 | static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | 249 | static __inline__ void udpv6_err(struct sk_buff *skb, |
250 | struct inet6_skb_parm *opt, int type, | ||
251 | int code, int offset, __be32 info ) | ||
252 | { | ||
253 | return __udp6_lib_err(skb, opt, type, code, offset, info, udp_hash); | ||
254 | } | ||
255 | |||
256 | int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | ||
274 | { | 257 | { |
258 | struct udp_sock *up = udp_sk(sk); | ||
275 | int rc; | 259 | int rc; |
276 | 260 | ||
277 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { | 261 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
278 | kfree_skb(skb); | 262 | goto drop; |
279 | return -1; | ||
280 | } | ||
281 | 263 | ||
282 | if (skb_checksum_complete(skb)) { | 264 | /* |
283 | UDP6_INC_STATS_BH(UDP_MIB_INERRORS); | 265 | * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). |
284 | kfree_skb(skb); | 266 | */ |
285 | return 0; | 267 | if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { |
268 | |||
269 | if (up->pcrlen == 0) { /* full coverage was set */ | ||
270 | LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage" | ||
271 | " %d while full coverage %d requested\n", | ||
272 | UDP_SKB_CB(skb)->cscov, skb->len); | ||
273 | goto drop; | ||
274 | } | ||
275 | if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { | ||
276 | LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: coverage %d " | ||
277 | "too small, need min %d\n", | ||
278 | UDP_SKB_CB(skb)->cscov, up->pcrlen); | ||
279 | goto drop; | ||
280 | } | ||
286 | } | 281 | } |
287 | 282 | ||
283 | if (udp_lib_checksum_complete(skb)) | ||
284 | goto drop; | ||
285 | |||
288 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { | 286 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { |
289 | /* Note that an ENOMEM error is charged twice */ | 287 | /* Note that an ENOMEM error is charged twice */ |
290 | if (rc == -ENOMEM) | 288 | if (rc == -ENOMEM) |
291 | UDP6_INC_STATS_BH(UDP_MIB_RCVBUFERRORS); | 289 | UDP6_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, up->pcflag); |
292 | UDP6_INC_STATS_BH(UDP_MIB_INERRORS); | 290 | goto drop; |
293 | kfree_skb(skb); | ||
294 | return 0; | ||
295 | } | 291 | } |
296 | UDP6_INC_STATS_BH(UDP_MIB_INDATAGRAMS); | 292 | UDP6_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag); |
297 | return 0; | 293 | return 0; |
294 | drop: | ||
295 | UDP6_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag); | ||
296 | kfree_skb(skb); | ||
297 | return -1; | ||
298 | } | 298 | } |
299 | 299 | ||
300 | static struct sock *udp_v6_mcast_next(struct sock *sk, | 300 | static struct sock *udp_v6_mcast_next(struct sock *sk, |
301 | u16 loc_port, struct in6_addr *loc_addr, | 301 | __be16 loc_port, struct in6_addr *loc_addr, |
302 | u16 rmt_port, struct in6_addr *rmt_addr, | 302 | __be16 rmt_port, struct in6_addr *rmt_addr, |
303 | int dif) | 303 | int dif) |
304 | { | 304 | { |
305 | struct hlist_node *node; | 305 | struct hlist_node *node; |
@@ -338,15 +338,15 @@ static struct sock *udp_v6_mcast_next(struct sock *sk, | |||
338 | * Note: called only from the BH handler context, | 338 | * Note: called only from the BH handler context, |
339 | * so we don't need to lock the hashes. | 339 | * so we don't need to lock the hashes. |
340 | */ | 340 | */ |
341 | static void udpv6_mcast_deliver(struct udphdr *uh, | 341 | static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr, |
342 | struct in6_addr *saddr, struct in6_addr *daddr, | 342 | struct in6_addr *daddr, struct hlist_head udptable[]) |
343 | struct sk_buff *skb) | ||
344 | { | 343 | { |
345 | struct sock *sk, *sk2; | 344 | struct sock *sk, *sk2; |
345 | const struct udphdr *uh = skb->h.uh; | ||
346 | int dif; | 346 | int dif; |
347 | 347 | ||
348 | read_lock(&udp_hash_lock); | 348 | read_lock(&udp_hash_lock); |
349 | sk = sk_head(&udp_hash[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); | 349 | sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); |
350 | dif = inet6_iif(skb); | 350 | dif = inet6_iif(skb); |
351 | sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); | 351 | sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); |
352 | if (!sk) { | 352 | if (!sk) { |
@@ -364,9 +364,35 @@ static void udpv6_mcast_deliver(struct udphdr *uh, | |||
364 | udpv6_queue_rcv_skb(sk, skb); | 364 | udpv6_queue_rcv_skb(sk, skb); |
365 | out: | 365 | out: |
366 | read_unlock(&udp_hash_lock); | 366 | read_unlock(&udp_hash_lock); |
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh) | ||
371 | |||
372 | { | ||
373 | if (uh->check == 0) { | ||
374 | /* RFC 2460 section 8.1 says that we SHOULD log | ||
375 | this error. Well, it is reasonable. | ||
376 | */ | ||
377 | LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n"); | ||
378 | return 1; | ||
379 | } | ||
380 | if (skb->ip_summed == CHECKSUM_COMPLETE && | ||
381 | !csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, | ||
382 | skb->len, IPPROTO_UDP, skb->csum )) | ||
383 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
384 | |||
385 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | ||
386 | skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr, | ||
387 | &skb->nh.ipv6h->daddr, | ||
388 | skb->len, IPPROTO_UDP, | ||
389 | 0)); | ||
390 | |||
391 | return (UDP_SKB_CB(skb)->partial_cov = 0); | ||
367 | } | 392 | } |
368 | 393 | ||
369 | static int udpv6_rcv(struct sk_buff **pskb) | 394 | int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[], |
395 | int is_udplite) | ||
370 | { | 396 | { |
371 | struct sk_buff *skb = *pskb; | 397 | struct sk_buff *skb = *pskb; |
372 | struct sock *sk; | 398 | struct sock *sk; |
@@ -383,44 +409,39 @@ static int udpv6_rcv(struct sk_buff **pskb) | |||
383 | uh = skb->h.uh; | 409 | uh = skb->h.uh; |
384 | 410 | ||
385 | ulen = ntohs(uh->len); | 411 | ulen = ntohs(uh->len); |
412 | if (ulen > skb->len) | ||
413 | goto short_packet; | ||
386 | 414 | ||
387 | /* Check for jumbo payload */ | 415 | if(! is_udplite ) { /* UDP validates ulen. */ |
388 | if (ulen == 0) | ||
389 | ulen = skb->len; | ||
390 | 416 | ||
391 | if (ulen > skb->len || ulen < sizeof(*uh)) | 417 | /* Check for jumbo payload */ |
392 | goto short_packet; | 418 | if (ulen == 0) |
419 | ulen = skb->len; | ||
393 | 420 | ||
394 | if (uh->check == 0) { | 421 | if (ulen < sizeof(*uh)) |
395 | /* RFC 2460 section 8.1 says that we SHOULD log | 422 | goto short_packet; |
396 | this error. Well, it is reasonable. | ||
397 | */ | ||
398 | LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n"); | ||
399 | goto discard; | ||
400 | } | ||
401 | 423 | ||
402 | if (ulen < skb->len) { | 424 | if (ulen < skb->len) { |
403 | if (pskb_trim_rcsum(skb, ulen)) | 425 | if (pskb_trim_rcsum(skb, ulen)) |
404 | goto discard; | 426 | goto short_packet; |
405 | saddr = &skb->nh.ipv6h->saddr; | 427 | saddr = &skb->nh.ipv6h->saddr; |
406 | daddr = &skb->nh.ipv6h->daddr; | 428 | daddr = &skb->nh.ipv6h->daddr; |
407 | uh = skb->h.uh; | 429 | uh = skb->h.uh; |
408 | } | 430 | } |
409 | 431 | ||
410 | if (skb->ip_summed == CHECKSUM_COMPLETE && | 432 | if (udp6_csum_init(skb, uh)) |
411 | !csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, skb->csum)) | 433 | goto discard; |
412 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
413 | 434 | ||
414 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | 435 | } else { /* UDP-Lite validates cscov. */ |
415 | skb->csum = ~csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, 0); | 436 | if (udplite6_csum_init(skb, uh)) |
437 | goto discard; | ||
438 | } | ||
416 | 439 | ||
417 | /* | 440 | /* |
418 | * Multicast receive code | 441 | * Multicast receive code |
419 | */ | 442 | */ |
420 | if (ipv6_addr_is_multicast(daddr)) { | 443 | if (ipv6_addr_is_multicast(daddr)) |
421 | udpv6_mcast_deliver(uh, saddr, daddr, skb); | 444 | return __udp6_lib_mcast_deliver(skb, saddr, daddr, udptable); |
422 | return 0; | ||
423 | } | ||
424 | 445 | ||
425 | /* Unicast */ | 446 | /* Unicast */ |
426 | 447 | ||
@@ -428,15 +449,16 @@ static int udpv6_rcv(struct sk_buff **pskb) | |||
428 | * check socket cache ... must talk to Alan about his plans | 449 | * check socket cache ... must talk to Alan about his plans |
429 | * for sock caches... i'll skip this for now. | 450 | * for sock caches... i'll skip this for now. |
430 | */ | 451 | */ |
431 | sk = udp_v6_lookup(saddr, uh->source, daddr, uh->dest, inet6_iif(skb)); | 452 | sk = __udp6_lib_lookup(saddr, uh->source, |
453 | daddr, uh->dest, inet6_iif(skb), udptable); | ||
432 | 454 | ||
433 | if (sk == NULL) { | 455 | if (sk == NULL) { |
434 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) | 456 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) |
435 | goto discard; | 457 | goto discard; |
436 | 458 | ||
437 | if (skb_checksum_complete(skb)) | 459 | if (udp_lib_checksum_complete(skb)) |
438 | goto discard; | 460 | goto discard; |
439 | UDP6_INC_STATS_BH(UDP_MIB_NOPORTS); | 461 | UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, is_udplite); |
440 | 462 | ||
441 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); | 463 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); |
442 | 464 | ||
@@ -451,14 +473,20 @@ static int udpv6_rcv(struct sk_buff **pskb) | |||
451 | return(0); | 473 | return(0); |
452 | 474 | ||
453 | short_packet: | 475 | short_packet: |
454 | if (net_ratelimit()) | 476 | LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n", |
455 | printk(KERN_DEBUG "UDP: short packet: %d/%u\n", ulen, skb->len); | 477 | is_udplite? "-Lite" : "", ulen, skb->len); |
456 | 478 | ||
457 | discard: | 479 | discard: |
458 | UDP6_INC_STATS_BH(UDP_MIB_INERRORS); | 480 | UDP6_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); |
459 | kfree_skb(skb); | 481 | kfree_skb(skb); |
460 | return(0); | 482 | return(0); |
461 | } | 483 | } |
484 | |||
485 | static __inline__ int udpv6_rcv(struct sk_buff **pskb) | ||
486 | { | ||
487 | return __udp6_lib_rcv(pskb, udp_hash, 0); | ||
488 | } | ||
489 | |||
462 | /* | 490 | /* |
463 | * Throw away all pending data and cancel the corking. Socket is locked. | 491 | * Throw away all pending data and cancel the corking. Socket is locked. |
464 | */ | 492 | */ |
@@ -477,13 +505,15 @@ static void udp_v6_flush_pending_frames(struct sock *sk) | |||
477 | * Sending | 505 | * Sending |
478 | */ | 506 | */ |
479 | 507 | ||
480 | static int udp_v6_push_pending_frames(struct sock *sk, struct udp_sock *up) | 508 | static int udp_v6_push_pending_frames(struct sock *sk) |
481 | { | 509 | { |
482 | struct sk_buff *skb; | 510 | struct sk_buff *skb; |
483 | struct udphdr *uh; | 511 | struct udphdr *uh; |
512 | struct udp_sock *up = udp_sk(sk); | ||
484 | struct inet_sock *inet = inet_sk(sk); | 513 | struct inet_sock *inet = inet_sk(sk); |
485 | struct flowi *fl = &inet->cork.fl; | 514 | struct flowi *fl = &inet->cork.fl; |
486 | int err = 0; | 515 | int err = 0; |
516 | __wsum csum = 0; | ||
487 | 517 | ||
488 | /* Grab the skbuff where UDP header space exists. */ | 518 | /* Grab the skbuff where UDP header space exists. */ |
489 | if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) | 519 | if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) |
@@ -498,35 +528,17 @@ static int udp_v6_push_pending_frames(struct sock *sk, struct udp_sock *up) | |||
498 | uh->len = htons(up->len); | 528 | uh->len = htons(up->len); |
499 | uh->check = 0; | 529 | uh->check = 0; |
500 | 530 | ||
501 | if (sk->sk_no_check == UDP_CSUM_NOXMIT) { | 531 | if (up->pcflag) |
502 | skb->ip_summed = CHECKSUM_NONE; | 532 | csum = udplite_csum_outgoing(sk, skb); |
503 | goto send; | 533 | else |
504 | } | 534 | csum = udp_csum_outgoing(sk, skb); |
505 | |||
506 | if (skb_queue_len(&sk->sk_write_queue) == 1) { | ||
507 | skb->csum = csum_partial((char *)uh, | ||
508 | sizeof(struct udphdr), skb->csum); | ||
509 | uh->check = csum_ipv6_magic(&fl->fl6_src, | ||
510 | &fl->fl6_dst, | ||
511 | up->len, fl->proto, skb->csum); | ||
512 | } else { | ||
513 | u32 tmp_csum = 0; | ||
514 | |||
515 | skb_queue_walk(&sk->sk_write_queue, skb) { | ||
516 | tmp_csum = csum_add(tmp_csum, skb->csum); | ||
517 | } | ||
518 | tmp_csum = csum_partial((char *)uh, | ||
519 | sizeof(struct udphdr), tmp_csum); | ||
520 | tmp_csum = csum_ipv6_magic(&fl->fl6_src, | ||
521 | &fl->fl6_dst, | ||
522 | up->len, fl->proto, tmp_csum); | ||
523 | uh->check = tmp_csum; | ||
524 | 535 | ||
525 | } | 536 | /* add protocol-dependent pseudo-header */ |
537 | uh->check = csum_ipv6_magic(&fl->fl6_src, &fl->fl6_dst, | ||
538 | up->len, fl->proto, csum ); | ||
526 | if (uh->check == 0) | 539 | if (uh->check == 0) |
527 | uh->check = -1; | 540 | uh->check = CSUM_MANGLED_0; |
528 | 541 | ||
529 | send: | ||
530 | err = ip6_push_pending_frames(sk); | 542 | err = ip6_push_pending_frames(sk); |
531 | out: | 543 | out: |
532 | up->len = 0; | 544 | up->len = 0; |
@@ -534,7 +546,7 @@ out: | |||
534 | return err; | 546 | return err; |
535 | } | 547 | } |
536 | 548 | ||
537 | static int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, | 549 | int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, |
538 | struct msghdr *msg, size_t len) | 550 | struct msghdr *msg, size_t len) |
539 | { | 551 | { |
540 | struct ipv6_txoptions opt_space; | 552 | struct ipv6_txoptions opt_space; |
@@ -554,6 +566,8 @@ static int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
554 | int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; | 566 | int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; |
555 | int err; | 567 | int err; |
556 | int connected = 0; | 568 | int connected = 0; |
569 | int is_udplite = up->pcflag; | ||
570 | int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); | ||
557 | 571 | ||
558 | /* destination address check */ | 572 | /* destination address check */ |
559 | if (sin6) { | 573 | if (sin6) { |
@@ -694,7 +708,7 @@ do_udp_sendmsg: | |||
694 | opt = fl6_merge_options(&opt_space, flowlabel, opt); | 708 | opt = fl6_merge_options(&opt_space, flowlabel, opt); |
695 | opt = ipv6_fixup_options(&opt_space, opt); | 709 | opt = ipv6_fixup_options(&opt_space, opt); |
696 | 710 | ||
697 | fl.proto = IPPROTO_UDP; | 711 | fl.proto = sk->sk_protocol; |
698 | ipv6_addr_copy(&fl.fl6_dst, daddr); | 712 | ipv6_addr_copy(&fl.fl6_dst, daddr); |
699 | if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) | 713 | if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) |
700 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 714 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
@@ -761,14 +775,15 @@ back_from_confirm: | |||
761 | 775 | ||
762 | do_append_data: | 776 | do_append_data: |
763 | up->len += ulen; | 777 | up->len += ulen; |
764 | err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen, | 778 | getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; |
779 | err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, | ||
765 | sizeof(struct udphdr), hlimit, tclass, opt, &fl, | 780 | sizeof(struct udphdr), hlimit, tclass, opt, &fl, |
766 | (struct rt6_info*)dst, | 781 | (struct rt6_info*)dst, |
767 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); | 782 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); |
768 | if (err) | 783 | if (err) |
769 | udp_v6_flush_pending_frames(sk); | 784 | udp_v6_flush_pending_frames(sk); |
770 | else if (!corkreq) | 785 | else if (!corkreq) |
771 | err = udp_v6_push_pending_frames(sk, up); | 786 | err = udp_v6_push_pending_frames(sk); |
772 | else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) | 787 | else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) |
773 | up->pending = 0; | 788 | up->pending = 0; |
774 | 789 | ||
@@ -793,7 +808,7 @@ do_append_data: | |||
793 | out: | 808 | out: |
794 | fl6_sock_release(flowlabel); | 809 | fl6_sock_release(flowlabel); |
795 | if (!err) { | 810 | if (!err) { |
796 | UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS); | 811 | UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); |
797 | return len; | 812 | return len; |
798 | } | 813 | } |
799 | /* | 814 | /* |
@@ -804,7 +819,7 @@ out: | |||
804 | * seems like overkill. | 819 | * seems like overkill. |
805 | */ | 820 | */ |
806 | if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { | 821 | if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { |
807 | UDP6_INC_STATS_USER(UDP_MIB_SNDBUFERRORS); | 822 | UDP6_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); |
808 | } | 823 | } |
809 | return err; | 824 | return err; |
810 | 825 | ||
@@ -816,7 +831,7 @@ do_confirm: | |||
816 | goto out; | 831 | goto out; |
817 | } | 832 | } |
818 | 833 | ||
819 | static int udpv6_destroy_sock(struct sock *sk) | 834 | int udpv6_destroy_sock(struct sock *sk) |
820 | { | 835 | { |
821 | lock_sock(sk); | 836 | lock_sock(sk); |
822 | udp_v6_flush_pending_frames(sk); | 837 | udp_v6_flush_pending_frames(sk); |
@@ -830,119 +845,41 @@ static int udpv6_destroy_sock(struct sock *sk) | |||
830 | /* | 845 | /* |
831 | * Socket option code for UDP | 846 | * Socket option code for UDP |
832 | */ | 847 | */ |
833 | static int do_udpv6_setsockopt(struct sock *sk, int level, int optname, | 848 | int udpv6_setsockopt(struct sock *sk, int level, int optname, |
834 | char __user *optval, int optlen) | 849 | char __user *optval, int optlen) |
835 | { | ||
836 | struct udp_sock *up = udp_sk(sk); | ||
837 | int val; | ||
838 | int err = 0; | ||
839 | |||
840 | if(optlen<sizeof(int)) | ||
841 | return -EINVAL; | ||
842 | |||
843 | if (get_user(val, (int __user *)optval)) | ||
844 | return -EFAULT; | ||
845 | |||
846 | switch(optname) { | ||
847 | case UDP_CORK: | ||
848 | if (val != 0) { | ||
849 | up->corkflag = 1; | ||
850 | } else { | ||
851 | up->corkflag = 0; | ||
852 | lock_sock(sk); | ||
853 | udp_v6_push_pending_frames(sk, up); | ||
854 | release_sock(sk); | ||
855 | } | ||
856 | break; | ||
857 | |||
858 | case UDP_ENCAP: | ||
859 | switch (val) { | ||
860 | case 0: | ||
861 | up->encap_type = val; | ||
862 | break; | ||
863 | default: | ||
864 | err = -ENOPROTOOPT; | ||
865 | break; | ||
866 | } | ||
867 | break; | ||
868 | |||
869 | default: | ||
870 | err = -ENOPROTOOPT; | ||
871 | break; | ||
872 | }; | ||
873 | |||
874 | return err; | ||
875 | } | ||
876 | |||
877 | static int udpv6_setsockopt(struct sock *sk, int level, int optname, | ||
878 | char __user *optval, int optlen) | ||
879 | { | 850 | { |
880 | if (level != SOL_UDP) | 851 | if (level == SOL_UDP || level == SOL_UDPLITE) |
881 | return ipv6_setsockopt(sk, level, optname, optval, optlen); | 852 | return udp_lib_setsockopt(sk, level, optname, optval, optlen, |
882 | return do_udpv6_setsockopt(sk, level, optname, optval, optlen); | 853 | udp_v6_push_pending_frames); |
854 | return ipv6_setsockopt(sk, level, optname, optval, optlen); | ||
883 | } | 855 | } |
884 | 856 | ||
885 | #ifdef CONFIG_COMPAT | 857 | #ifdef CONFIG_COMPAT |
886 | static int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, | 858 | int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, |
887 | char __user *optval, int optlen) | 859 | char __user *optval, int optlen) |
888 | { | 860 | { |
889 | if (level != SOL_UDP) | 861 | if (level == SOL_UDP || level == SOL_UDPLITE) |
890 | return compat_ipv6_setsockopt(sk, level, optname, | 862 | return udp_lib_setsockopt(sk, level, optname, optval, optlen, |
891 | optval, optlen); | 863 | udp_v6_push_pending_frames); |
892 | return do_udpv6_setsockopt(sk, level, optname, optval, optlen); | 864 | return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); |
893 | } | 865 | } |
894 | #endif | 866 | #endif |
895 | 867 | ||
896 | static int do_udpv6_getsockopt(struct sock *sk, int level, int optname, | 868 | int udpv6_getsockopt(struct sock *sk, int level, int optname, |
897 | char __user *optval, int __user *optlen) | 869 | char __user *optval, int __user *optlen) |
898 | { | ||
899 | struct udp_sock *up = udp_sk(sk); | ||
900 | int val, len; | ||
901 | |||
902 | if(get_user(len,optlen)) | ||
903 | return -EFAULT; | ||
904 | |||
905 | len = min_t(unsigned int, len, sizeof(int)); | ||
906 | |||
907 | if(len < 0) | ||
908 | return -EINVAL; | ||
909 | |||
910 | switch(optname) { | ||
911 | case UDP_CORK: | ||
912 | val = up->corkflag; | ||
913 | break; | ||
914 | |||
915 | case UDP_ENCAP: | ||
916 | val = up->encap_type; | ||
917 | break; | ||
918 | |||
919 | default: | ||
920 | return -ENOPROTOOPT; | ||
921 | }; | ||
922 | |||
923 | if(put_user(len, optlen)) | ||
924 | return -EFAULT; | ||
925 | if(copy_to_user(optval, &val,len)) | ||
926 | return -EFAULT; | ||
927 | return 0; | ||
928 | } | ||
929 | |||
930 | static int udpv6_getsockopt(struct sock *sk, int level, int optname, | ||
931 | char __user *optval, int __user *optlen) | ||
932 | { | 870 | { |
933 | if (level != SOL_UDP) | 871 | if (level == SOL_UDP || level == SOL_UDPLITE) |
934 | return ipv6_getsockopt(sk, level, optname, optval, optlen); | 872 | return udp_lib_getsockopt(sk, level, optname, optval, optlen); |
935 | return do_udpv6_getsockopt(sk, level, optname, optval, optlen); | 873 | return ipv6_getsockopt(sk, level, optname, optval, optlen); |
936 | } | 874 | } |
937 | 875 | ||
938 | #ifdef CONFIG_COMPAT | 876 | #ifdef CONFIG_COMPAT |
939 | static int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, | 877 | int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, |
940 | char __user *optval, int __user *optlen) | 878 | char __user *optval, int __user *optlen) |
941 | { | 879 | { |
942 | if (level != SOL_UDP) | 880 | if (level == SOL_UDP || level == SOL_UDPLITE) |
943 | return compat_ipv6_getsockopt(sk, level, optname, | 881 | return udp_lib_getsockopt(sk, level, optname, optval, optlen); |
944 | optval, optlen); | 882 | return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); |
945 | return do_udpv6_getsockopt(sk, level, optname, optval, optlen); | ||
946 | } | 883 | } |
947 | #endif | 884 | #endif |
948 | 885 | ||
@@ -983,7 +920,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket | |||
983 | atomic_read(&sp->sk_refcnt), sp); | 920 | atomic_read(&sp->sk_refcnt), sp); |
984 | } | 921 | } |
985 | 922 | ||
986 | static int udp6_seq_show(struct seq_file *seq, void *v) | 923 | int udp6_seq_show(struct seq_file *seq, void *v) |
987 | { | 924 | { |
988 | if (v == SEQ_START_TOKEN) | 925 | if (v == SEQ_START_TOKEN) |
989 | seq_printf(seq, | 926 | seq_printf(seq, |
@@ -1002,6 +939,7 @@ static struct udp_seq_afinfo udp6_seq_afinfo = { | |||
1002 | .owner = THIS_MODULE, | 939 | .owner = THIS_MODULE, |
1003 | .name = "udp6", | 940 | .name = "udp6", |
1004 | .family = AF_INET6, | 941 | .family = AF_INET6, |
942 | .hashtable = udp_hash, | ||
1005 | .seq_show = udp6_seq_show, | 943 | .seq_show = udp6_seq_show, |
1006 | .seq_fops = &udp6_seq_fops, | 944 | .seq_fops = &udp6_seq_fops, |
1007 | }; | 945 | }; |
@@ -1021,7 +959,7 @@ void udp6_proc_exit(void) { | |||
1021 | struct proto udpv6_prot = { | 959 | struct proto udpv6_prot = { |
1022 | .name = "UDPv6", | 960 | .name = "UDPv6", |
1023 | .owner = THIS_MODULE, | 961 | .owner = THIS_MODULE, |
1024 | .close = udpv6_close, | 962 | .close = udp_lib_close, |
1025 | .connect = ip6_datagram_connect, | 963 | .connect = ip6_datagram_connect, |
1026 | .disconnect = udp_disconnect, | 964 | .disconnect = udp_disconnect, |
1027 | .ioctl = udp_ioctl, | 965 | .ioctl = udp_ioctl, |
@@ -1031,8 +969,8 @@ struct proto udpv6_prot = { | |||
1031 | .sendmsg = udpv6_sendmsg, | 969 | .sendmsg = udpv6_sendmsg, |
1032 | .recvmsg = udpv6_recvmsg, | 970 | .recvmsg = udpv6_recvmsg, |
1033 | .backlog_rcv = udpv6_queue_rcv_skb, | 971 | .backlog_rcv = udpv6_queue_rcv_skb, |
1034 | .hash = udp_v6_hash, | 972 | .hash = udp_lib_hash, |
1035 | .unhash = udp_v6_unhash, | 973 | .unhash = udp_lib_unhash, |
1036 | .get_port = udp_v6_get_port, | 974 | .get_port = udp_v6_get_port, |
1037 | .obj_size = sizeof(struct udp6_sock), | 975 | .obj_size = sizeof(struct udp6_sock), |
1038 | #ifdef CONFIG_COMPAT | 976 | #ifdef CONFIG_COMPAT |
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h new file mode 100644 index 000000000000..ec9878899128 --- /dev/null +++ b/net/ipv6/udp_impl.h | |||
@@ -0,0 +1,34 @@ | |||
1 | #ifndef _UDP6_IMPL_H | ||
2 | #define _UDP6_IMPL_H | ||
3 | #include <net/udp.h> | ||
4 | #include <net/udplite.h> | ||
5 | #include <net/protocol.h> | ||
6 | #include <net/addrconf.h> | ||
7 | #include <net/inet_common.h> | ||
8 | |||
9 | extern int __udp6_lib_rcv(struct sk_buff **, struct hlist_head [], int ); | ||
10 | extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, | ||
11 | int , int , int , __be32 , struct hlist_head []); | ||
12 | |||
13 | extern int udpv6_getsockopt(struct sock *sk, int level, int optname, | ||
14 | char __user *optval, int __user *optlen); | ||
15 | extern int udpv6_setsockopt(struct sock *sk, int level, int optname, | ||
16 | char __user *optval, int optlen); | ||
17 | #ifdef CONFIG_COMPAT | ||
18 | extern int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, | ||
19 | char __user *optval, int optlen); | ||
20 | extern int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, | ||
21 | char __user *optval, int __user *optlen); | ||
22 | #endif | ||
23 | extern int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, | ||
24 | struct msghdr *msg, size_t len); | ||
25 | extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | ||
26 | struct msghdr *msg, size_t len, | ||
27 | int noblock, int flags, int *addr_len); | ||
28 | extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); | ||
29 | extern int udpv6_destroy_sock(struct sock *sk); | ||
30 | |||
31 | #ifdef CONFIG_PROC_FS | ||
32 | extern int udp6_seq_show(struct seq_file *seq, void *v); | ||
33 | #endif | ||
34 | #endif /* _UDP6_IMPL_H */ | ||
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c new file mode 100644 index 000000000000..629f97162fbc --- /dev/null +++ b/net/ipv6/udplite.c | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * UDPLITEv6 An implementation of the UDP-Lite protocol over IPv6. | ||
3 | * See also net/ipv4/udplite.c | ||
4 | * | ||
5 | * Version: $Id: udplite.c,v 1.9 2006/10/19 08:28:10 gerrit Exp $ | ||
6 | * | ||
7 | * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> | ||
8 | * | ||
9 | * Changes: | ||
10 | * Fixes: | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | #include "udp_impl.h" | ||
17 | |||
18 | DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6) __read_mostly; | ||
19 | |||
20 | static int udplitev6_rcv(struct sk_buff **pskb) | ||
21 | { | ||
22 | return __udp6_lib_rcv(pskb, udplite_hash, 1); | ||
23 | } | ||
24 | |||
25 | static void udplitev6_err(struct sk_buff *skb, | ||
26 | struct inet6_skb_parm *opt, | ||
27 | int type, int code, int offset, __be32 info) | ||
28 | { | ||
29 | return __udp6_lib_err(skb, opt, type, code, offset, info, udplite_hash); | ||
30 | } | ||
31 | |||
32 | static struct inet6_protocol udplitev6_protocol = { | ||
33 | .handler = udplitev6_rcv, | ||
34 | .err_handler = udplitev6_err, | ||
35 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, | ||
36 | }; | ||
37 | |||
38 | static int udplite_v6_get_port(struct sock *sk, unsigned short snum) | ||
39 | { | ||
40 | return udplite_get_port(sk, snum, ipv6_rcv_saddr_equal); | ||
41 | } | ||
42 | |||
43 | struct proto udplitev6_prot = { | ||
44 | .name = "UDPLITEv6", | ||
45 | .owner = THIS_MODULE, | ||
46 | .close = udp_lib_close, | ||
47 | .connect = ip6_datagram_connect, | ||
48 | .disconnect = udp_disconnect, | ||
49 | .ioctl = udp_ioctl, | ||
50 | .init = udplite_sk_init, | ||
51 | .destroy = udpv6_destroy_sock, | ||
52 | .setsockopt = udpv6_setsockopt, | ||
53 | .getsockopt = udpv6_getsockopt, | ||
54 | .sendmsg = udpv6_sendmsg, | ||
55 | .recvmsg = udpv6_recvmsg, | ||
56 | .backlog_rcv = udpv6_queue_rcv_skb, | ||
57 | .hash = udp_lib_hash, | ||
58 | .unhash = udp_lib_unhash, | ||
59 | .get_port = udplite_v6_get_port, | ||
60 | .obj_size = sizeof(struct udp6_sock), | ||
61 | #ifdef CONFIG_COMPAT | ||
62 | .compat_setsockopt = compat_udpv6_setsockopt, | ||
63 | .compat_getsockopt = compat_udpv6_getsockopt, | ||
64 | #endif | ||
65 | }; | ||
66 | |||
67 | static struct inet_protosw udplite6_protosw = { | ||
68 | .type = SOCK_DGRAM, | ||
69 | .protocol = IPPROTO_UDPLITE, | ||
70 | .prot = &udplitev6_prot, | ||
71 | .ops = &inet6_dgram_ops, | ||
72 | .capability = -1, | ||
73 | .no_check = 0, | ||
74 | .flags = INET_PROTOSW_PERMANENT, | ||
75 | }; | ||
76 | |||
77 | void __init udplitev6_init(void) | ||
78 | { | ||
79 | if (inet6_add_protocol(&udplitev6_protocol, IPPROTO_UDPLITE) < 0) | ||
80 | printk(KERN_ERR "%s: Could not register.\n", __FUNCTION__); | ||
81 | |||
82 | inet6_register_protosw(&udplite6_protosw); | ||
83 | } | ||
84 | |||
85 | #ifdef CONFIG_PROC_FS | ||
86 | static struct file_operations udplite6_seq_fops; | ||
87 | static struct udp_seq_afinfo udplite6_seq_afinfo = { | ||
88 | .owner = THIS_MODULE, | ||
89 | .name = "udplite6", | ||
90 | .family = AF_INET6, | ||
91 | .hashtable = udplite_hash, | ||
92 | .seq_show = udp6_seq_show, | ||
93 | .seq_fops = &udplite6_seq_fops, | ||
94 | }; | ||
95 | |||
96 | int __init udplite6_proc_init(void) | ||
97 | { | ||
98 | return udp_proc_register(&udplite6_seq_afinfo); | ||
99 | } | ||
100 | |||
101 | void udplite6_proc_exit(void) | ||
102 | { | ||
103 | udp_proc_unregister(&udplite6_seq_afinfo); | ||
104 | } | ||
105 | #endif | ||
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index d400f8fae129..8dffd4daae9c 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -274,11 +274,12 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl) | |||
274 | break; | 274 | break; |
275 | 275 | ||
276 | case IPPROTO_UDP: | 276 | case IPPROTO_UDP: |
277 | case IPPROTO_UDPLITE: | ||
277 | case IPPROTO_TCP: | 278 | case IPPROTO_TCP: |
278 | case IPPROTO_SCTP: | 279 | case IPPROTO_SCTP: |
279 | case IPPROTO_DCCP: | 280 | case IPPROTO_DCCP: |
280 | if (pskb_may_pull(skb, skb->nh.raw + offset + 4 - skb->data)) { | 281 | if (pskb_may_pull(skb, skb->nh.raw + offset + 4 - skb->data)) { |
281 | u16 *ports = (u16 *)exthdr; | 282 | __be16 *ports = (__be16 *)exthdr; |
282 | 283 | ||
283 | fl->fl_ip_sport = ports[0]; | 284 | fl->fl_ip_sport = ports[0]; |
284 | fl->fl_ip_dport = ports[1]; | 285 | fl->fl_ip_dport = ports[1]; |
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 7931e4f898d4..01a5c52a2be3 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c | |||
@@ -62,7 +62,7 @@ static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) | |||
62 | { | 62 | { |
63 | unsigned h; | 63 | unsigned h; |
64 | 64 | ||
65 | h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]; | 65 | h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]); |
66 | h ^= h >> 16; | 66 | h ^= h >> 16; |
67 | h ^= h >> 8; | 67 | h ^= h >> 8; |
68 | h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; | 68 | h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; |
@@ -126,7 +126,7 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) | |||
126 | return NULL; | 126 | return NULL; |
127 | } | 127 | } |
128 | 128 | ||
129 | u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) | 129 | __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) |
130 | { | 130 | { |
131 | struct xfrm6_tunnel_spi *x6spi; | 131 | struct xfrm6_tunnel_spi *x6spi; |
132 | u32 spi; | 132 | u32 spi; |
@@ -196,7 +196,7 @@ out: | |||
196 | return spi; | 196 | return spi; |
197 | } | 197 | } |
198 | 198 | ||
199 | u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) | 199 | __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) |
200 | { | 200 | { |
201 | struct xfrm6_tunnel_spi *x6spi; | 201 | struct xfrm6_tunnel_spi *x6spi; |
202 | u32 spi; | 202 | u32 spi; |
@@ -265,7 +265,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb) | |||
265 | } | 265 | } |
266 | 266 | ||
267 | static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 267 | static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
268 | int type, int code, int offset, __u32 info) | 268 | int type, int code, int offset, __be32 info) |
269 | { | 269 | { |
270 | /* xfrm6_tunnel native err handling */ | 270 | /* xfrm6_tunnel native err handling */ |
271 | switch (type) { | 271 | switch (type) { |
diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 415cf4eec23b..8cfd076c4c12 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c | |||
@@ -172,7 +172,7 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv, | |||
172 | 172 | ||
173 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 173 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); |
174 | 174 | ||
175 | self = kmalloc(sizeof(struct iriap_cb), GFP_ATOMIC); | 175 | self = kzalloc(sizeof(*self), GFP_ATOMIC); |
176 | if (!self) { | 176 | if (!self) { |
177 | IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); | 177 | IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); |
178 | return NULL; | 178 | return NULL; |
@@ -181,7 +181,6 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv, | |||
181 | /* | 181 | /* |
182 | * Initialize instance | 182 | * Initialize instance |
183 | */ | 183 | */ |
184 | memset(self, 0, sizeof(struct iriap_cb)); | ||
185 | 184 | ||
186 | self->magic = IAS_MAGIC; | 185 | self->magic = IAS_MAGIC; |
187 | self->mode = mode; | 186 | self->mode = mode; |
@@ -451,12 +450,12 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, | |||
451 | n = 2; | 450 | n = 2; |
452 | 451 | ||
453 | /* Get length, MSB first */ | 452 | /* Get length, MSB first */ |
454 | len = be16_to_cpu(get_unaligned((__u16 *)(fp+n))); n += 2; | 453 | len = be16_to_cpu(get_unaligned((__be16 *)(fp+n))); n += 2; |
455 | 454 | ||
456 | IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len); | 455 | IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__, len); |
457 | 456 | ||
458 | /* Get object ID, MSB first */ | 457 | /* Get object ID, MSB first */ |
459 | obj_id = be16_to_cpu(get_unaligned((__u16 *)(fp+n))); n += 2; | 458 | obj_id = be16_to_cpu(get_unaligned((__be16 *)(fp+n))); n += 2; |
460 | 459 | ||
461 | type = fp[n++]; | 460 | type = fp[n++]; |
462 | IRDA_DEBUG(4, "%s(), Value type = %d\n", __FUNCTION__, type); | 461 | IRDA_DEBUG(4, "%s(), Value type = %d\n", __FUNCTION__, type); |
@@ -506,7 +505,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, | |||
506 | value = irias_new_string_value(fp+n); | 505 | value = irias_new_string_value(fp+n); |
507 | break; | 506 | break; |
508 | case IAS_OCT_SEQ: | 507 | case IAS_OCT_SEQ: |
509 | value_len = be16_to_cpu(get_unaligned((__u16 *)(fp+n))); | 508 | value_len = be16_to_cpu(get_unaligned((__be16 *)(fp+n))); |
510 | n += 2; | 509 | n += 2; |
511 | 510 | ||
512 | /* Will truncate to IAS_MAX_OCTET_STRING bytes */ | 511 | /* Will truncate to IAS_MAX_OCTET_STRING bytes */ |
@@ -544,7 +543,7 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self, | |||
544 | { | 543 | { |
545 | struct sk_buff *tx_skb; | 544 | struct sk_buff *tx_skb; |
546 | int n; | 545 | int n; |
547 | __u32 tmp_be32; | 546 | __be32 tmp_be32; |
548 | __be16 tmp_be16; | 547 | __be16 tmp_be16; |
549 | __u8 *fp; | 548 | __u8 *fp; |
550 | 549 | ||
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c index 56292ab7d652..b1ee99a59c0c 100644 --- a/net/irda/irias_object.c +++ b/net/irda/irias_object.c | |||
@@ -501,13 +501,12 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len) | |||
501 | len = IAS_MAX_OCTET_STRING; | 501 | len = IAS_MAX_OCTET_STRING; |
502 | value->len = len; | 502 | value->len = len; |
503 | 503 | ||
504 | value->t.oct_seq = kmalloc(len, GFP_ATOMIC); | 504 | value->t.oct_seq = kmemdup(octseq, len, GFP_ATOMIC); |
505 | if (value->t.oct_seq == NULL){ | 505 | if (value->t.oct_seq == NULL){ |
506 | IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); | 506 | IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); |
507 | kfree(value); | 507 | kfree(value); |
508 | return NULL; | 508 | return NULL; |
509 | } | 509 | } |
510 | memcpy(value->t.oct_seq, octseq , len); | ||
511 | return value; | 510 | return value; |
512 | } | 511 | } |
513 | 512 | ||
@@ -522,7 +521,6 @@ struct ias_value *irias_new_missing_value(void) | |||
522 | } | 521 | } |
523 | 522 | ||
524 | value->type = IAS_MISSING; | 523 | value->type = IAS_MISSING; |
525 | value->len = 0; | ||
526 | 524 | ||
527 | return value; | 525 | return value; |
528 | } | 526 | } |
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index 9b962f247714..2bb04ac09329 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c | |||
@@ -995,7 +995,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, | |||
995 | { | 995 | { |
996 | __u8 *frame; | 996 | __u8 *frame; |
997 | __u8 param_len; | 997 | __u8 param_len; |
998 | __u16 tmp_le; /* Temporary value in little endian format */ | 998 | __le16 tmp_le; /* Temporary value in little endian format */ |
999 | int n=0; | 999 | int n=0; |
1000 | 1000 | ||
1001 | if (skb == NULL) { | 1001 | if (skb == NULL) { |
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index fede83763095..7e5d12ab3b90 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c | |||
@@ -641,15 +641,13 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance) | |||
641 | } | 641 | } |
642 | 642 | ||
643 | /* Allocate a new instance */ | 643 | /* Allocate a new instance */ |
644 | new = kmalloc(sizeof(struct lsap_cb), GFP_ATOMIC); | 644 | new = kmemdup(orig, sizeof(*new), GFP_ATOMIC); |
645 | if (!new) { | 645 | if (!new) { |
646 | IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __FUNCTION__); | 646 | IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __FUNCTION__); |
647 | spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, | 647 | spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, |
648 | flags); | 648 | flags); |
649 | return NULL; | 649 | return NULL; |
650 | } | 650 | } |
651 | /* Dup */ | ||
652 | memcpy(new, orig, sizeof(struct lsap_cb)); | ||
653 | /* new->lap = orig->lap; => done in the memcpy() */ | 651 | /* new->lap = orig->lap; => done in the memcpy() */ |
654 | /* new->slsap_sel = orig->slsap_sel; => done in the memcpy() */ | 652 | /* new->slsap_sel = orig->slsap_sel; => done in the memcpy() */ |
655 | new->conn_skb = NULL; | 653 | new->conn_skb = NULL; |
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c index 1ba8c7106639..1d26cd33ea13 100644 --- a/net/irda/irqueue.c +++ b/net/irda/irqueue.c | |||
@@ -356,14 +356,13 @@ hashbin_t *hashbin_new(int type) | |||
356 | /* | 356 | /* |
357 | * Allocate new hashbin | 357 | * Allocate new hashbin |
358 | */ | 358 | */ |
359 | hashbin = kmalloc( sizeof(hashbin_t), GFP_ATOMIC); | 359 | hashbin = kzalloc(sizeof(*hashbin), GFP_ATOMIC); |
360 | if (!hashbin) | 360 | if (!hashbin) |
361 | return NULL; | 361 | return NULL; |
362 | 362 | ||
363 | /* | 363 | /* |
364 | * Initialize structure | 364 | * Initialize structure |
365 | */ | 365 | */ |
366 | memset(hashbin, 0, sizeof(hashbin_t)); | ||
367 | hashbin->hb_type = type; | 366 | hashbin->hb_type = type; |
368 | hashbin->magic = HB_MAGIC; | 367 | hashbin->magic = HB_MAGIC; |
369 | //hashbin->hb_current = NULL; | 368 | //hashbin->hb_current = NULL; |
diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 3c2e70b77df1..9c446a72ff1f 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c | |||
@@ -1147,7 +1147,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel, | |||
1147 | frame[3] = 0x02; /* Value length */ | 1147 | frame[3] = 0x02; /* Value length */ |
1148 | 1148 | ||
1149 | put_unaligned(cpu_to_be16((__u16) max_sdu_size), | 1149 | put_unaligned(cpu_to_be16((__u16) max_sdu_size), |
1150 | (__u16 *)(frame+4)); | 1150 | (__be16 *)(frame+4)); |
1151 | } else { | 1151 | } else { |
1152 | /* Insert plain TTP header */ | 1152 | /* Insert plain TTP header */ |
1153 | frame = skb_push(tx_skb, TTP_HEADER); | 1153 | frame = skb_push(tx_skb, TTP_HEADER); |
@@ -1394,7 +1394,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size, | |||
1394 | frame[3] = 0x02; /* Value length */ | 1394 | frame[3] = 0x02; /* Value length */ |
1395 | 1395 | ||
1396 | put_unaligned(cpu_to_be16((__u16) max_sdu_size), | 1396 | put_unaligned(cpu_to_be16((__u16) max_sdu_size), |
1397 | (__u16 *)(frame+4)); | 1397 | (__be16 *)(frame+4)); |
1398 | } else { | 1398 | } else { |
1399 | /* Insert TTP header */ | 1399 | /* Insert TTP header */ |
1400 | frame = skb_push(tx_skb, TTP_HEADER); | 1400 | frame = skb_push(tx_skb, TTP_HEADER); |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 20ff7cca1d07..0e1dbfbb9b10 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -1767,11 +1767,11 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) | |||
1767 | 1767 | ||
1768 | /* addresses present only in tunnel mode */ | 1768 | /* addresses present only in tunnel mode */ |
1769 | if (t->mode == XFRM_MODE_TUNNEL) { | 1769 | if (t->mode == XFRM_MODE_TUNNEL) { |
1770 | switch (xp->family) { | 1770 | struct sockaddr *sa; |
1771 | sa = (struct sockaddr *)(rq+1); | ||
1772 | switch(sa->sa_family) { | ||
1771 | case AF_INET: | 1773 | case AF_INET: |
1772 | sin = (void*)(rq+1); | 1774 | sin = (struct sockaddr_in*)sa; |
1773 | if (sin->sin_family != AF_INET) | ||
1774 | return -EINVAL; | ||
1775 | t->saddr.a4 = sin->sin_addr.s_addr; | 1775 | t->saddr.a4 = sin->sin_addr.s_addr; |
1776 | sin++; | 1776 | sin++; |
1777 | if (sin->sin_family != AF_INET) | 1777 | if (sin->sin_family != AF_INET) |
@@ -1780,9 +1780,7 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) | |||
1780 | break; | 1780 | break; |
1781 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1781 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
1782 | case AF_INET6: | 1782 | case AF_INET6: |
1783 | sin6 = (void *)(rq+1); | 1783 | sin6 = (struct sockaddr_in6*)sa; |
1784 | if (sin6->sin6_family != AF_INET6) | ||
1785 | return -EINVAL; | ||
1786 | memcpy(t->saddr.a6, &sin6->sin6_addr, sizeof(struct in6_addr)); | 1784 | memcpy(t->saddr.a6, &sin6->sin6_addr, sizeof(struct in6_addr)); |
1787 | sin6++; | 1785 | sin6++; |
1788 | if (sin6->sin6_family != AF_INET6) | 1786 | if (sin6->sin6_family != AF_INET6) |
@@ -1793,7 +1791,10 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) | |||
1793 | default: | 1791 | default: |
1794 | return -EINVAL; | 1792 | return -EINVAL; |
1795 | } | 1793 | } |
1796 | } | 1794 | t->encap_family = sa->sa_family; |
1795 | } else | ||
1796 | t->encap_family = xp->family; | ||
1797 | |||
1797 | /* No way to set this via kame pfkey */ | 1798 | /* No way to set this via kame pfkey */ |
1798 | t->aalgos = t->ealgos = t->calgos = ~0; | 1799 | t->aalgos = t->ealgos = t->calgos = ~0; |
1799 | xp->xfrm_nr++; | 1800 | xp->xfrm_nr++; |
@@ -1830,18 +1831,25 @@ static inline int pfkey_xfrm_policy2sec_ctx_size(struct xfrm_policy *xp) | |||
1830 | 1831 | ||
1831 | static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp) | 1832 | static int pfkey_xfrm_policy2msg_size(struct xfrm_policy *xp) |
1832 | { | 1833 | { |
1834 | struct xfrm_tmpl *t; | ||
1833 | int sockaddr_size = pfkey_sockaddr_size(xp->family); | 1835 | int sockaddr_size = pfkey_sockaddr_size(xp->family); |
1834 | int socklen = (xp->family == AF_INET ? | 1836 | int socklen = 0; |
1835 | sizeof(struct sockaddr_in) : | 1837 | int i; |
1836 | sizeof(struct sockaddr_in6)); | 1838 | |
1839 | for (i=0; i<xp->xfrm_nr; i++) { | ||
1840 | t = xp->xfrm_vec + i; | ||
1841 | socklen += (t->encap_family == AF_INET ? | ||
1842 | sizeof(struct sockaddr_in) : | ||
1843 | sizeof(struct sockaddr_in6)); | ||
1844 | } | ||
1837 | 1845 | ||
1838 | return sizeof(struct sadb_msg) + | 1846 | return sizeof(struct sadb_msg) + |
1839 | (sizeof(struct sadb_lifetime) * 3) + | 1847 | (sizeof(struct sadb_lifetime) * 3) + |
1840 | (sizeof(struct sadb_address) * 2) + | 1848 | (sizeof(struct sadb_address) * 2) + |
1841 | (sockaddr_size * 2) + | 1849 | (sockaddr_size * 2) + |
1842 | sizeof(struct sadb_x_policy) + | 1850 | sizeof(struct sadb_x_policy) + |
1843 | (xp->xfrm_nr * (sizeof(struct sadb_x_ipsecrequest) + | 1851 | (xp->xfrm_nr * sizeof(struct sadb_x_ipsecrequest)) + |
1844 | (socklen * 2))) + | 1852 | (socklen * 2) + |
1845 | pfkey_xfrm_policy2sec_ctx_size(xp); | 1853 | pfkey_xfrm_policy2sec_ctx_size(xp); |
1846 | } | 1854 | } |
1847 | 1855 | ||
@@ -1999,7 +2007,9 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i | |||
1999 | 2007 | ||
2000 | req_size = sizeof(struct sadb_x_ipsecrequest); | 2008 | req_size = sizeof(struct sadb_x_ipsecrequest); |
2001 | if (t->mode == XFRM_MODE_TUNNEL) | 2009 | if (t->mode == XFRM_MODE_TUNNEL) |
2002 | req_size += 2*socklen; | 2010 | req_size += ((t->encap_family == AF_INET ? |
2011 | sizeof(struct sockaddr_in) : | ||
2012 | sizeof(struct sockaddr_in6)) * 2); | ||
2003 | else | 2013 | else |
2004 | size -= 2*socklen; | 2014 | size -= 2*socklen; |
2005 | rq = (void*)skb_put(skb, req_size); | 2015 | rq = (void*)skb_put(skb, req_size); |
@@ -2015,7 +2025,7 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i | |||
2015 | rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE; | 2025 | rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE; |
2016 | rq->sadb_x_ipsecrequest_reqid = t->reqid; | 2026 | rq->sadb_x_ipsecrequest_reqid = t->reqid; |
2017 | if (t->mode == XFRM_MODE_TUNNEL) { | 2027 | if (t->mode == XFRM_MODE_TUNNEL) { |
2018 | switch (xp->family) { | 2028 | switch (t->encap_family) { |
2019 | case AF_INET: | 2029 | case AF_INET: |
2020 | sin = (void*)(rq+1); | 2030 | sin = (void*)(rq+1); |
2021 | sin->sin_family = AF_INET; | 2031 | sin->sin_family = AF_INET; |
@@ -2938,7 +2948,7 @@ out: | |||
2938 | return NULL; | 2948 | return NULL; |
2939 | } | 2949 | } |
2940 | 2950 | ||
2941 | static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport) | 2951 | static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) |
2942 | { | 2952 | { |
2943 | struct sk_buff *skb; | 2953 | struct sk_buff *skb; |
2944 | struct sadb_msg *hdr; | 2954 | struct sadb_msg *hdr; |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 2652ead96c64..190bb3e05188 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -64,7 +64,7 @@ static inline u16 llc_ui_next_link_no(int sap) | |||
64 | * | 64 | * |
65 | * Given an ARP header type return the corresponding ethernet protocol. | 65 | * Given an ARP header type return the corresponding ethernet protocol. |
66 | */ | 66 | */ |
67 | static inline u16 llc_proto_type(u16 arphrd) | 67 | static inline __be16 llc_proto_type(u16 arphrd) |
68 | { | 68 | { |
69 | return arphrd == ARPHRD_IEEE802_TR ? | 69 | return arphrd == ARPHRD_IEEE802_TR ? |
70 | htons(ETH_P_TR_802_2) : htons(ETH_P_802_2); | 70 | htons(ETH_P_TR_802_2) : htons(ETH_P_802_2); |
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index 94d2368ade92..db82aff6e40f 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c | |||
@@ -115,8 +115,8 @@ static inline int llc_fixup_skb(struct sk_buff *skb) | |||
115 | skb->h.raw += llc_len; | 115 | skb->h.raw += llc_len; |
116 | skb_pull(skb, llc_len); | 116 | skb_pull(skb, llc_len); |
117 | if (skb->protocol == htons(ETH_P_802_2)) { | 117 | if (skb->protocol == htons(ETH_P_802_2)) { |
118 | u16 pdulen = eth_hdr(skb)->h_proto, | 118 | __be16 pdulen = eth_hdr(skb)->h_proto; |
119 | data_size = ntohs(pdulen) - llc_len; | 119 | u16 data_size = ntohs(pdulen) - llc_len; |
120 | 120 | ||
121 | if (unlikely(pskb_trim_rcsum(skb, data_size))) | 121 | if (unlikely(pskb_trim_rcsum(skb, data_size))) |
122 | return 0; | 122 | return 0; |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index f619c6527266..3a66878a1829 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -25,19 +25,57 @@ config NETFILTER_NETLINK_LOG | |||
25 | and is also scheduled to replace the old syslog-based ipt_LOG | 25 | and is also scheduled to replace the old syslog-based ipt_LOG |
26 | and ip6t_LOG modules. | 26 | and ip6t_LOG modules. |
27 | 27 | ||
28 | config NF_CONNTRACK | 28 | config NF_CONNTRACK_ENABLED |
29 | tristate "Layer 3 Independent Connection tracking (EXPERIMENTAL)" | 29 | tristate "Netfilter connection tracking support" |
30 | depends on EXPERIMENTAL && IP_NF_CONNTRACK=n | 30 | help |
31 | default n | ||
32 | ---help--- | ||
33 | Connection tracking keeps a record of what packets have passed | 31 | Connection tracking keeps a record of what packets have passed |
34 | through your machine, in order to figure out how they are related | 32 | through your machine, in order to figure out how they are related |
35 | into connections. | 33 | into connections. |
36 | 34 | ||
35 | This is required to do Masquerading or other kinds of Network | ||
36 | Address Translation (except for Fast NAT). It can also be used to | ||
37 | enhance packet filtering (see `Connection state match support' | ||
38 | below). | ||
39 | |||
40 | To compile it as a module, choose M here. If unsure, say N. | ||
41 | |||
42 | choice | ||
43 | prompt "Netfilter connection tracking support" | ||
44 | depends on NF_CONNTRACK_ENABLED | ||
45 | |||
46 | config NF_CONNTRACK_SUPPORT | ||
47 | bool "Layer 3 Independent Connection tracking (EXPERIMENTAL)" | ||
48 | depends on EXPERIMENTAL | ||
49 | help | ||
37 | Layer 3 independent connection tracking is experimental scheme | 50 | Layer 3 independent connection tracking is experimental scheme |
38 | which generalize ip_conntrack to support other layer 3 protocols. | 51 | which generalize ip_conntrack to support other layer 3 protocols. |
39 | 52 | ||
40 | To compile it as a module, choose M here. If unsure, say N. | 53 | This is required to do Masquerading or other kinds of Network |
54 | Address Translation (except for Fast NAT). It can also be used to | ||
55 | enhance packet filtering (see `Connection state match support' | ||
56 | below). | ||
57 | |||
58 | config IP_NF_CONNTRACK_SUPPORT | ||
59 | bool "Layer 3 Dependent Connection tracking (OBSOLETE)" | ||
60 | help | ||
61 | The old, Layer 3 dependent ip_conntrack subsystem of netfilter. | ||
62 | |||
63 | This is required to do Masquerading or other kinds of Network | ||
64 | Address Translation (except for Fast NAT). It can also be used to | ||
65 | enhance packet filtering (see `Connection state match support' | ||
66 | below). | ||
67 | |||
68 | endchoice | ||
69 | |||
70 | config NF_CONNTRACK | ||
71 | tristate | ||
72 | default m if NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=m | ||
73 | default y if NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=y | ||
74 | |||
75 | config IP_NF_CONNTRACK | ||
76 | tristate | ||
77 | default m if IP_NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=m | ||
78 | default y if IP_NF_CONNTRACK_SUPPORT && NF_CONNTRACK_ENABLED=y | ||
41 | 79 | ||
42 | config NF_CT_ACCT | 80 | config NF_CT_ACCT |
43 | bool "Connection tracking flow accounting" | 81 | bool "Connection tracking flow accounting" |
@@ -82,8 +120,12 @@ config NF_CONNTRACK_EVENTS | |||
82 | 120 | ||
83 | If unsure, say `N'. | 121 | If unsure, say `N'. |
84 | 122 | ||
123 | config NF_CT_PROTO_GRE | ||
124 | tristate | ||
125 | depends on EXPERIMENTAL && NF_CONNTRACK | ||
126 | |||
85 | config NF_CT_PROTO_SCTP | 127 | config NF_CT_PROTO_SCTP |
86 | tristate 'SCTP protocol on new connection tracking support (EXPERIMENTAL)' | 128 | tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)' |
87 | depends on EXPERIMENTAL && NF_CONNTRACK | 129 | depends on EXPERIMENTAL && NF_CONNTRACK |
88 | default n | 130 | default n |
89 | help | 131 | help |
@@ -93,8 +135,23 @@ config NF_CT_PROTO_SCTP | |||
93 | If you want to compile it as a module, say M here and read | 135 | If you want to compile it as a module, say M here and read |
94 | Documentation/modules.txt. If unsure, say `N'. | 136 | Documentation/modules.txt. If unsure, say `N'. |
95 | 137 | ||
138 | config NF_CONNTRACK_AMANDA | ||
139 | tristate "Amanda backup protocol support (EXPERIMENTAL)" | ||
140 | depends on EXPERIMENTAL && NF_CONNTRACK | ||
141 | select TEXTSEARCH | ||
142 | select TEXTSEARCH_KMP | ||
143 | help | ||
144 | If you are running the Amanda backup package <http://www.amanda.org/> | ||
145 | on this machine or machines that will be MASQUERADED through this | ||
146 | machine, then you may want to enable this feature. This allows the | ||
147 | connection tracking and natting code to allow the sub-channels that | ||
148 | Amanda requires for communication of the backup data, messages and | ||
149 | index. | ||
150 | |||
151 | To compile it as a module, choose M here. If unsure, say N. | ||
152 | |||
96 | config NF_CONNTRACK_FTP | 153 | config NF_CONNTRACK_FTP |
97 | tristate "FTP support on new connection tracking (EXPERIMENTAL)" | 154 | tristate "FTP protocol support (EXPERIMENTAL)" |
98 | depends on EXPERIMENTAL && NF_CONNTRACK | 155 | depends on EXPERIMENTAL && NF_CONNTRACK |
99 | help | 156 | help |
100 | Tracking FTP connections is problematic: special helpers are | 157 | Tracking FTP connections is problematic: special helpers are |
@@ -107,6 +164,101 @@ config NF_CONNTRACK_FTP | |||
107 | 164 | ||
108 | To compile it as a module, choose M here. If unsure, say N. | 165 | To compile it as a module, choose M here. If unsure, say N. |
109 | 166 | ||
167 | config NF_CONNTRACK_H323 | ||
168 | tristate "H.323 protocol support (EXPERIMENTAL)" | ||
169 | depends on EXPERIMENTAL && NF_CONNTRACK | ||
170 | help | ||
171 | H.323 is a VoIP signalling protocol from ITU-T. As one of the most | ||
172 | important VoIP protocols, it is widely used by voice hardware and | ||
173 | software including voice gateways, IP phones, Netmeeting, OpenPhone, | ||
174 | Gnomemeeting, etc. | ||
175 | |||
176 | With this module you can support H.323 on a connection tracking/NAT | ||
177 | firewall. | ||
178 | |||
179 | This module supports RAS, Fast Start, H.245 Tunnelling, Call | ||
180 | Forwarding, RTP/RTCP and T.120 based audio, video, fax, chat, | ||
181 | whiteboard, file transfer, etc. For more information, please | ||
182 | visit http://nath323.sourceforge.net/. | ||
183 | |||
184 | To compile it as a module, choose M here. If unsure, say N. | ||
185 | |||
186 | config NF_CONNTRACK_IRC | ||
187 | tristate "IRC protocol support (EXPERIMENTAL)" | ||
188 | depends on EXPERIMENTAL && NF_CONNTRACK | ||
189 | help | ||
190 | There is a commonly-used extension to IRC called | ||
191 | Direct Client-to-Client Protocol (DCC). This enables users to send | ||
192 | files to each other, and also chat to each other without the need | ||
193 | of a server. DCC Sending is used anywhere you send files over IRC, | ||
194 | and DCC Chat is most commonly used by Eggdrop bots. If you are | ||
195 | using NAT, this extension will enable you to send files and initiate | ||
196 | chats. Note that you do NOT need this extension to get files or | ||
197 | have others initiate chats, or everything else in IRC. | ||
198 | |||
199 | To compile it as a module, choose M here. If unsure, say N. | ||
200 | |||
201 | config NF_CONNTRACK_NETBIOS_NS | ||
202 | tristate "NetBIOS name service protocol support (EXPERIMENTAL)" | ||
203 | depends on EXPERIMENTAL && NF_CONNTRACK | ||
204 | help | ||
205 | NetBIOS name service requests are sent as broadcast messages from an | ||
206 | unprivileged port and responded to with unicast messages to the | ||
207 | same port. This make them hard to firewall properly because connection | ||
208 | tracking doesn't deal with broadcasts. This helper tracks locally | ||
209 | originating NetBIOS name service requests and the corresponding | ||
210 | responses. It relies on correct IP address configuration, specifically | ||
211 | netmask and broadcast address. When properly configured, the output | ||
212 | of "ip address show" should look similar to this: | ||
213 | |||
214 | $ ip -4 address show eth0 | ||
215 | 4: eth0: <BROADCAST,MULTICAST,UP> mtu 1500 qdisc pfifo_fast qlen 1000 | ||
216 | inet 172.16.2.252/24 brd 172.16.2.255 scope global eth0 | ||
217 | |||
218 | To compile it as a module, choose M here. If unsure, say N. | ||
219 | |||
220 | config NF_CONNTRACK_PPTP | ||
221 | tristate "PPtP protocol support (EXPERIMENTAL)" | ||
222 | depends on EXPERIMENTAL && NF_CONNTRACK | ||
223 | select NF_CT_PROTO_GRE | ||
224 | help | ||
225 | This module adds support for PPTP (Point to Point Tunnelling | ||
226 | Protocol, RFC2637) connection tracking and NAT. | ||
227 | |||
228 | If you are running PPTP sessions over a stateful firewall or NAT | ||
229 | box, you may want to enable this feature. | ||
230 | |||
231 | Please note that not all PPTP modes of operation are supported yet. | ||
232 | Specifically these limitations exist: | ||
233 | - Blindy assumes that control connections are always established | ||
234 | in PNS->PAC direction. This is a violation of RFC2637. | ||
235 | - Only supports a single call within each session | ||
236 | |||
237 | To compile it as a module, choose M here. If unsure, say N. | ||
238 | |||
239 | config NF_CONNTRACK_SIP | ||
240 | tristate "SIP protocol support (EXPERIMENTAL)" | ||
241 | depends on EXPERIMENTAL && NF_CONNTRACK | ||
242 | help | ||
243 | SIP is an application-layer control protocol that can establish, | ||
244 | modify, and terminate multimedia sessions (conferences) such as | ||
245 | Internet telephony calls. With the ip_conntrack_sip and | ||
246 | the nf_nat_sip modules you can support the protocol on a connection | ||
247 | tracking/NATing firewall. | ||
248 | |||
249 | To compile it as a module, choose M here. If unsure, say N. | ||
250 | |||
251 | config NF_CONNTRACK_TFTP | ||
252 | tristate "TFTP protocol support (EXPERIMENTAL)" | ||
253 | depends on EXPERIMENTAL && NF_CONNTRACK | ||
254 | help | ||
255 | TFTP connection tracking helper, this is required depending | ||
256 | on how restrictive your ruleset is. | ||
257 | If you are using a tftp client behind -j SNAT or -j MASQUERADING | ||
258 | you will need this. | ||
259 | |||
260 | To compile it as a module, choose M here. If unsure, say N. | ||
261 | |||
110 | config NF_CT_NETLINK | 262 | config NF_CT_NETLINK |
111 | tristate 'Connection tracking netlink interface (EXPERIMENTAL)' | 263 | tristate 'Connection tracking netlink interface (EXPERIMENTAL)' |
112 | depends on EXPERIMENTAL && NF_CONNTRACK && NETFILTER_NETLINK | 264 | depends on EXPERIMENTAL && NF_CONNTRACK && NETFILTER_NETLINK |
@@ -184,6 +336,17 @@ config NETFILTER_XT_TARGET_NFQUEUE | |||
184 | 336 | ||
185 | To compile it as a module, choose M here. If unsure, say N. | 337 | To compile it as a module, choose M here. If unsure, say N. |
186 | 338 | ||
339 | config NETFILTER_XT_TARGET_NFLOG | ||
340 | tristate '"NFLOG" target support' | ||
341 | depends on NETFILTER_XTABLES | ||
342 | help | ||
343 | This option enables the NFLOG target, which allows to LOG | ||
344 | messages through the netfilter logging API, which can use | ||
345 | either the old LOG target, the old ULOG target or nfnetlink_log | ||
346 | as backend. | ||
347 | |||
348 | To compile it as a module, choose M here. If unsure, say N. | ||
349 | |||
187 | config NETFILTER_XT_TARGET_NOTRACK | 350 | config NETFILTER_XT_TARGET_NOTRACK |
188 | tristate '"NOTRACK" target support' | 351 | tristate '"NOTRACK" target support' |
189 | depends on NETFILTER_XTABLES | 352 | depends on NETFILTER_XTABLES |
@@ -464,5 +627,19 @@ config NETFILTER_XT_MATCH_TCPMSS | |||
464 | 627 | ||
465 | To compile it as a module, choose M here. If unsure, say N. | 628 | To compile it as a module, choose M here. If unsure, say N. |
466 | 629 | ||
630 | config NETFILTER_XT_MATCH_HASHLIMIT | ||
631 | tristate '"hashlimit" match support' | ||
632 | depends on NETFILTER_XTABLES | ||
633 | help | ||
634 | This option adds a `hashlimit' match. | ||
635 | |||
636 | As opposed to `limit', this match dynamically creates a hash table | ||
637 | of limit buckets, based on your selection of source/destination | ||
638 | addresses and/or ports. | ||
639 | |||
640 | It enables you to express policies like `10kpps for any given | ||
641 | destination address' or `500pps from any given source address' | ||
642 | with a single rule. | ||
643 | |||
467 | endmenu | 644 | endmenu |
468 | 645 | ||
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index a74be492fd0a..5dc5574f7e99 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile | |||
@@ -1,7 +1,10 @@ | |||
1 | netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o | 1 | netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o |
2 | nf_conntrack-objs := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o | 2 | |
3 | nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o | ||
4 | nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o | ||
3 | 5 | ||
4 | obj-$(CONFIG_NETFILTER) = netfilter.o | 6 | obj-$(CONFIG_NETFILTER) = netfilter.o |
7 | obj-$(CONFIG_SYSCTL) += nf_sysctl.o | ||
5 | 8 | ||
6 | obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o | 9 | obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o |
7 | obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o | 10 | obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o |
@@ -11,13 +14,23 @@ obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o | |||
11 | obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o | 14 | obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o |
12 | 15 | ||
13 | # SCTP protocol connection tracking | 16 | # SCTP protocol connection tracking |
17 | obj-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o | ||
14 | obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o | 18 | obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o |
15 | 19 | ||
16 | # netlink interface for nf_conntrack | 20 | # netlink interface for nf_conntrack |
17 | obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o | 21 | obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o |
18 | 22 | ||
19 | # connection tracking helpers | 23 | # connection tracking helpers |
24 | nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o | ||
25 | |||
26 | obj-$(CONFIG_NF_CONNTRACK_AMANDA) += nf_conntrack_amanda.o | ||
20 | obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o | 27 | obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o |
28 | obj-$(CONFIG_NF_CONNTRACK_H323) += nf_conntrack_h323.o | ||
29 | obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o | ||
30 | obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o | ||
31 | obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o | ||
32 | obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o | ||
33 | obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o | ||
21 | 34 | ||
22 | # generic X tables | 35 | # generic X tables |
23 | obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o | 36 | obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o |
@@ -28,6 +41,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o | |||
28 | obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o | 41 | obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o |
29 | obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o | 42 | obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o |
30 | obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o | 43 | obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o |
44 | obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o | ||
31 | obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o | 45 | obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o |
32 | obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o | 46 | obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o |
33 | obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o | 47 | obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o |
@@ -56,3 +70,4 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_STATISTIC) += xt_statistic.o | |||
56 | obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o | 70 | obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o |
57 | obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o | 71 | obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o |
58 | obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o | 72 | obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o |
73 | obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o | ||
diff --git a/net/netfilter/core.c b/net/netfilter/core.c index d80b935b3a92..291b8c6862f1 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | static DEFINE_SPINLOCK(afinfo_lock); | 29 | static DEFINE_SPINLOCK(afinfo_lock); |
30 | 30 | ||
31 | struct nf_afinfo *nf_afinfo[NPROTO]; | 31 | struct nf_afinfo *nf_afinfo[NPROTO] __read_mostly; |
32 | EXPORT_SYMBOL(nf_afinfo); | 32 | EXPORT_SYMBOL(nf_afinfo); |
33 | 33 | ||
34 | int nf_register_afinfo(struct nf_afinfo *afinfo) | 34 | int nf_register_afinfo(struct nf_afinfo *afinfo) |
@@ -54,7 +54,7 @@ EXPORT_SYMBOL_GPL(nf_unregister_afinfo); | |||
54 | * of skbuffs queued for userspace, and not deregister a hook unless | 54 | * of skbuffs queued for userspace, and not deregister a hook unless |
55 | * this is zero, but that sucks. Now, we simply check when the | 55 | * this is zero, but that sucks. Now, we simply check when the |
56 | * packets come back: if the hook is gone, the packet is discarded. */ | 56 | * packets come back: if the hook is gone, the packet is discarded. */ |
57 | struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS]; | 57 | struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS] __read_mostly; |
58 | EXPORT_SYMBOL(nf_hooks); | 58 | EXPORT_SYMBOL(nf_hooks); |
59 | static DEFINE_SPINLOCK(nf_hook_lock); | 59 | static DEFINE_SPINLOCK(nf_hook_lock); |
60 | 60 | ||
@@ -222,28 +222,21 @@ copy_skb: | |||
222 | } | 222 | } |
223 | EXPORT_SYMBOL(skb_make_writable); | 223 | EXPORT_SYMBOL(skb_make_writable); |
224 | 224 | ||
225 | u_int16_t nf_csum_update(u_int32_t oldval, u_int32_t newval, u_int32_t csum) | 225 | void nf_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, |
226 | { | 226 | __be32 from, __be32 to, int pseudohdr) |
227 | u_int32_t diff[] = { oldval, newval }; | ||
228 | |||
229 | return csum_fold(csum_partial((char *)diff, sizeof(diff), ~csum)); | ||
230 | } | ||
231 | EXPORT_SYMBOL(nf_csum_update); | ||
232 | |||
233 | u_int16_t nf_proto_csum_update(struct sk_buff *skb, | ||
234 | u_int32_t oldval, u_int32_t newval, | ||
235 | u_int16_t csum, int pseudohdr) | ||
236 | { | 227 | { |
228 | __be32 diff[] = { ~from, to }; | ||
237 | if (skb->ip_summed != CHECKSUM_PARTIAL) { | 229 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
238 | csum = nf_csum_update(oldval, newval, csum); | 230 | *sum = csum_fold(csum_partial((char *)diff, sizeof(diff), |
231 | ~csum_unfold(*sum))); | ||
239 | if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr) | 232 | if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr) |
240 | skb->csum = nf_csum_update(oldval, newval, skb->csum); | 233 | skb->csum = ~csum_partial((char *)diff, sizeof(diff), |
234 | ~skb->csum); | ||
241 | } else if (pseudohdr) | 235 | } else if (pseudohdr) |
242 | csum = ~nf_csum_update(oldval, newval, ~csum); | 236 | *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff), |
243 | 237 | csum_unfold(*sum))); | |
244 | return csum; | ||
245 | } | 238 | } |
246 | EXPORT_SYMBOL(nf_proto_csum_update); | 239 | EXPORT_SYMBOL(nf_proto_csum_replace4); |
247 | 240 | ||
248 | /* This does not belong here, but locally generated errors need it if connection | 241 | /* This does not belong here, but locally generated errors need it if connection |
249 | tracking in use: without this, connection may not be in hash table, and hence | 242 | tracking in use: without this, connection may not be in hash table, and hence |
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c new file mode 100644 index 000000000000..b8869eab7650 --- /dev/null +++ b/net/netfilter/nf_conntrack_amanda.c | |||
@@ -0,0 +1,238 @@ | |||
1 | /* Amanda extension for IP connection tracking | ||
2 | * | ||
3 | * (C) 2002 by Brian J. Murrell <netfilter@interlinx.bc.ca> | ||
4 | * based on HW's ip_conntrack_irc.c as well as other modules | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/moduleparam.h> | ||
14 | #include <linux/textsearch.h> | ||
15 | #include <linux/skbuff.h> | ||
16 | #include <linux/in.h> | ||
17 | #include <linux/udp.h> | ||
18 | #include <linux/netfilter.h> | ||
19 | |||
20 | #include <net/netfilter/nf_conntrack.h> | ||
21 | #include <net/netfilter/nf_conntrack_expect.h> | ||
22 | #include <net/netfilter/nf_conntrack_ecache.h> | ||
23 | #include <net/netfilter/nf_conntrack_helper.h> | ||
24 | #include <linux/netfilter/nf_conntrack_amanda.h> | ||
25 | |||
26 | static unsigned int master_timeout __read_mostly = 300; | ||
27 | static char *ts_algo = "kmp"; | ||
28 | |||
29 | MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>"); | ||
30 | MODULE_DESCRIPTION("Amanda connection tracking module"); | ||
31 | MODULE_LICENSE("GPL"); | ||
32 | MODULE_ALIAS("ip_conntrack_amanda"); | ||
33 | |||
34 | module_param(master_timeout, uint, 0600); | ||
35 | MODULE_PARM_DESC(master_timeout, "timeout for the master connection"); | ||
36 | module_param(ts_algo, charp, 0400); | ||
37 | MODULE_PARM_DESC(ts_algo, "textsearch algorithm to use (default kmp)"); | ||
38 | |||
39 | unsigned int (*nf_nat_amanda_hook)(struct sk_buff **pskb, | ||
40 | enum ip_conntrack_info ctinfo, | ||
41 | unsigned int matchoff, | ||
42 | unsigned int matchlen, | ||
43 | struct nf_conntrack_expect *exp) | ||
44 | __read_mostly; | ||
45 | EXPORT_SYMBOL_GPL(nf_nat_amanda_hook); | ||
46 | |||
47 | enum amanda_strings { | ||
48 | SEARCH_CONNECT, | ||
49 | SEARCH_NEWLINE, | ||
50 | SEARCH_DATA, | ||
51 | SEARCH_MESG, | ||
52 | SEARCH_INDEX, | ||
53 | }; | ||
54 | |||
55 | static struct { | ||
56 | char *string; | ||
57 | size_t len; | ||
58 | struct ts_config *ts; | ||
59 | } search[] __read_mostly = { | ||
60 | [SEARCH_CONNECT] = { | ||
61 | .string = "CONNECT ", | ||
62 | .len = 8, | ||
63 | }, | ||
64 | [SEARCH_NEWLINE] = { | ||
65 | .string = "\n", | ||
66 | .len = 1, | ||
67 | }, | ||
68 | [SEARCH_DATA] = { | ||
69 | .string = "DATA ", | ||
70 | .len = 5, | ||
71 | }, | ||
72 | [SEARCH_MESG] = { | ||
73 | .string = "MESG ", | ||
74 | .len = 5, | ||
75 | }, | ||
76 | [SEARCH_INDEX] = { | ||
77 | .string = "INDEX ", | ||
78 | .len = 6, | ||
79 | }, | ||
80 | }; | ||
81 | |||
82 | static int amanda_help(struct sk_buff **pskb, | ||
83 | unsigned int protoff, | ||
84 | struct nf_conn *ct, | ||
85 | enum ip_conntrack_info ctinfo) | ||
86 | { | ||
87 | struct ts_state ts; | ||
88 | struct nf_conntrack_expect *exp; | ||
89 | struct nf_conntrack_tuple *tuple; | ||
90 | unsigned int dataoff, start, stop, off, i; | ||
91 | char pbuf[sizeof("65535")], *tmp; | ||
92 | u_int16_t len; | ||
93 | __be16 port; | ||
94 | int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; | ||
95 | int ret = NF_ACCEPT; | ||
96 | typeof(nf_nat_amanda_hook) nf_nat_amanda; | ||
97 | |||
98 | /* Only look at packets from the Amanda server */ | ||
99 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) | ||
100 | return NF_ACCEPT; | ||
101 | |||
102 | /* increase the UDP timeout of the master connection as replies from | ||
103 | * Amanda clients to the server can be quite delayed */ | ||
104 | nf_ct_refresh(ct, *pskb, master_timeout * HZ); | ||
105 | |||
106 | /* No data? */ | ||
107 | dataoff = protoff + sizeof(struct udphdr); | ||
108 | if (dataoff >= (*pskb)->len) { | ||
109 | if (net_ratelimit()) | ||
110 | printk("amanda_help: skblen = %u\n", (*pskb)->len); | ||
111 | return NF_ACCEPT; | ||
112 | } | ||
113 | |||
114 | memset(&ts, 0, sizeof(ts)); | ||
115 | start = skb_find_text(*pskb, dataoff, (*pskb)->len, | ||
116 | search[SEARCH_CONNECT].ts, &ts); | ||
117 | if (start == UINT_MAX) | ||
118 | goto out; | ||
119 | start += dataoff + search[SEARCH_CONNECT].len; | ||
120 | |||
121 | memset(&ts, 0, sizeof(ts)); | ||
122 | stop = skb_find_text(*pskb, start, (*pskb)->len, | ||
123 | search[SEARCH_NEWLINE].ts, &ts); | ||
124 | if (stop == UINT_MAX) | ||
125 | goto out; | ||
126 | stop += start; | ||
127 | |||
128 | for (i = SEARCH_DATA; i <= SEARCH_INDEX; i++) { | ||
129 | memset(&ts, 0, sizeof(ts)); | ||
130 | off = skb_find_text(*pskb, start, stop, search[i].ts, &ts); | ||
131 | if (off == UINT_MAX) | ||
132 | continue; | ||
133 | off += start + search[i].len; | ||
134 | |||
135 | len = min_t(unsigned int, sizeof(pbuf) - 1, stop - off); | ||
136 | if (skb_copy_bits(*pskb, off, pbuf, len)) | ||
137 | break; | ||
138 | pbuf[len] = '\0'; | ||
139 | |||
140 | port = htons(simple_strtoul(pbuf, &tmp, 10)); | ||
141 | len = tmp - pbuf; | ||
142 | if (port == 0 || len > 5) | ||
143 | break; | ||
144 | |||
145 | exp = nf_conntrack_expect_alloc(ct); | ||
146 | if (exp == NULL) { | ||
147 | ret = NF_DROP; | ||
148 | goto out; | ||
149 | } | ||
150 | tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; | ||
151 | nf_conntrack_expect_init(exp, family, | ||
152 | &tuple->src.u3, &tuple->dst.u3, | ||
153 | IPPROTO_TCP, NULL, &port); | ||
154 | |||
155 | nf_nat_amanda = rcu_dereference(nf_nat_amanda_hook); | ||
156 | if (nf_nat_amanda && ct->status & IPS_NAT_MASK) | ||
157 | ret = nf_nat_amanda(pskb, ctinfo, off - dataoff, | ||
158 | len, exp); | ||
159 | else if (nf_conntrack_expect_related(exp) != 0) | ||
160 | ret = NF_DROP; | ||
161 | nf_conntrack_expect_put(exp); | ||
162 | } | ||
163 | |||
164 | out: | ||
165 | return ret; | ||
166 | } | ||
167 | |||
168 | static struct nf_conntrack_helper amanda_helper[2] __read_mostly = { | ||
169 | { | ||
170 | .name = "amanda", | ||
171 | .max_expected = 3, | ||
172 | .timeout = 180, | ||
173 | .me = THIS_MODULE, | ||
174 | .help = amanda_help, | ||
175 | .tuple.src.l3num = AF_INET, | ||
176 | .tuple.src.u.udp.port = __constant_htons(10080), | ||
177 | .tuple.dst.protonum = IPPROTO_UDP, | ||
178 | .mask.src.l3num = 0xFFFF, | ||
179 | .mask.src.u.udp.port = __constant_htons(0xFFFF), | ||
180 | .mask.dst.protonum = 0xFF, | ||
181 | }, | ||
182 | { | ||
183 | .name = "amanda", | ||
184 | .max_expected = 3, | ||
185 | .timeout = 180, | ||
186 | .me = THIS_MODULE, | ||
187 | .help = amanda_help, | ||
188 | .tuple.src.l3num = AF_INET6, | ||
189 | .tuple.src.u.udp.port = __constant_htons(10080), | ||
190 | .tuple.dst.protonum = IPPROTO_UDP, | ||
191 | .mask.src.l3num = 0xFFFF, | ||
192 | .mask.src.u.udp.port = __constant_htons(0xFFFF), | ||
193 | .mask.dst.protonum = 0xFF, | ||
194 | }, | ||
195 | }; | ||
196 | |||
197 | static void __exit nf_conntrack_amanda_fini(void) | ||
198 | { | ||
199 | int i; | ||
200 | |||
201 | nf_conntrack_helper_unregister(&amanda_helper[0]); | ||
202 | nf_conntrack_helper_unregister(&amanda_helper[1]); | ||
203 | for (i = 0; i < ARRAY_SIZE(search); i++) | ||
204 | textsearch_destroy(search[i].ts); | ||
205 | } | ||
206 | |||
207 | static int __init nf_conntrack_amanda_init(void) | ||
208 | { | ||
209 | int ret, i; | ||
210 | |||
211 | ret = -ENOMEM; | ||
212 | for (i = 0; i < ARRAY_SIZE(search); i++) { | ||
213 | search[i].ts = textsearch_prepare(ts_algo, search[i].string, | ||
214 | search[i].len, | ||
215 | GFP_KERNEL, TS_AUTOLOAD); | ||
216 | if (search[i].ts == NULL) | ||
217 | goto err1; | ||
218 | } | ||
219 | ret = nf_conntrack_helper_register(&amanda_helper[0]); | ||
220 | if (ret < 0) | ||
221 | goto err1; | ||
222 | ret = nf_conntrack_helper_register(&amanda_helper[1]); | ||
223 | if (ret < 0) | ||
224 | goto err2; | ||
225 | return 0; | ||
226 | |||
227 | err2: | ||
228 | nf_conntrack_helper_unregister(&amanda_helper[0]); | ||
229 | err1: | ||
230 | for (; i >= 0; i--) { | ||
231 | if (search[i].ts) | ||
232 | textsearch_destroy(search[i].ts); | ||
233 | } | ||
234 | return ret; | ||
235 | } | ||
236 | |||
237 | module_init(nf_conntrack_amanda_init); | ||
238 | module_exit(nf_conntrack_amanda_fini); | ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 836541e509fe..93d97d9f9da8 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -47,14 +47,10 @@ | |||
47 | #include <linux/netdevice.h> | 47 | #include <linux/netdevice.h> |
48 | #include <linux/socket.h> | 48 | #include <linux/socket.h> |
49 | 49 | ||
50 | /* This rwlock protects the main hash table, protocol/helper/expected | ||
51 | registrations, conntrack timers*/ | ||
52 | #define ASSERT_READ_LOCK(x) | ||
53 | #define ASSERT_WRITE_LOCK(x) | ||
54 | |||
55 | #include <net/netfilter/nf_conntrack.h> | 50 | #include <net/netfilter/nf_conntrack.h> |
56 | #include <net/netfilter/nf_conntrack_l3proto.h> | 51 | #include <net/netfilter/nf_conntrack_l3proto.h> |
57 | #include <net/netfilter/nf_conntrack_protocol.h> | 52 | #include <net/netfilter/nf_conntrack_l4proto.h> |
53 | #include <net/netfilter/nf_conntrack_expect.h> | ||
58 | #include <net/netfilter/nf_conntrack_helper.h> | 54 | #include <net/netfilter/nf_conntrack_helper.h> |
59 | #include <net/netfilter/nf_conntrack_core.h> | 55 | #include <net/netfilter/nf_conntrack_core.h> |
60 | 56 | ||
@@ -67,92 +63,32 @@ | |||
67 | #endif | 63 | #endif |
68 | 64 | ||
69 | DEFINE_RWLOCK(nf_conntrack_lock); | 65 | DEFINE_RWLOCK(nf_conntrack_lock); |
66 | EXPORT_SYMBOL_GPL(nf_conntrack_lock); | ||
70 | 67 | ||
71 | /* nf_conntrack_standalone needs this */ | 68 | /* nf_conntrack_standalone needs this */ |
72 | atomic_t nf_conntrack_count = ATOMIC_INIT(0); | 69 | atomic_t nf_conntrack_count = ATOMIC_INIT(0); |
70 | EXPORT_SYMBOL_GPL(nf_conntrack_count); | ||
73 | 71 | ||
74 | void (*nf_conntrack_destroyed)(struct nf_conn *conntrack) = NULL; | 72 | void (*nf_conntrack_destroyed)(struct nf_conn *conntrack); |
75 | LIST_HEAD(nf_conntrack_expect_list); | 73 | EXPORT_SYMBOL_GPL(nf_conntrack_destroyed); |
76 | struct nf_conntrack_protocol **nf_ct_protos[PF_MAX] __read_mostly; | ||
77 | struct nf_conntrack_l3proto *nf_ct_l3protos[PF_MAX] __read_mostly; | ||
78 | static LIST_HEAD(helpers); | ||
79 | unsigned int nf_conntrack_htable_size __read_mostly = 0; | ||
80 | int nf_conntrack_max __read_mostly; | ||
81 | struct list_head *nf_conntrack_hash __read_mostly; | ||
82 | static kmem_cache_t *nf_conntrack_expect_cachep __read_mostly; | ||
83 | struct nf_conn nf_conntrack_untracked; | ||
84 | unsigned int nf_ct_log_invalid __read_mostly; | ||
85 | static LIST_HEAD(unconfirmed); | ||
86 | static int nf_conntrack_vmalloc __read_mostly; | ||
87 | |||
88 | static unsigned int nf_conntrack_next_id; | ||
89 | static unsigned int nf_conntrack_expect_next_id; | ||
90 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | ||
91 | ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain); | ||
92 | ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain); | ||
93 | 74 | ||
94 | DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); | 75 | unsigned int nf_conntrack_htable_size __read_mostly; |
76 | EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); | ||
95 | 77 | ||
96 | /* deliver cached events and clear cache entry - must be called with locally | 78 | int nf_conntrack_max __read_mostly; |
97 | * disabled softirqs */ | 79 | EXPORT_SYMBOL_GPL(nf_conntrack_max); |
98 | static inline void | ||
99 | __nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache) | ||
100 | { | ||
101 | DEBUGP("ecache: delivering events for %p\n", ecache->ct); | ||
102 | if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct) | ||
103 | && ecache->events) | ||
104 | atomic_notifier_call_chain(&nf_conntrack_chain, ecache->events, | ||
105 | ecache->ct); | ||
106 | |||
107 | ecache->events = 0; | ||
108 | nf_ct_put(ecache->ct); | ||
109 | ecache->ct = NULL; | ||
110 | } | ||
111 | 80 | ||
112 | /* Deliver all cached events for a particular conntrack. This is called | 81 | struct list_head *nf_conntrack_hash __read_mostly; |
113 | * by code prior to async packet handling for freeing the skb */ | 82 | EXPORT_SYMBOL_GPL(nf_conntrack_hash); |
114 | void nf_ct_deliver_cached_events(const struct nf_conn *ct) | ||
115 | { | ||
116 | struct nf_conntrack_ecache *ecache; | ||
117 | 83 | ||
118 | local_bh_disable(); | 84 | struct nf_conn nf_conntrack_untracked __read_mostly; |
119 | ecache = &__get_cpu_var(nf_conntrack_ecache); | 85 | EXPORT_SYMBOL_GPL(nf_conntrack_untracked); |
120 | if (ecache->ct == ct) | ||
121 | __nf_ct_deliver_cached_events(ecache); | ||
122 | local_bh_enable(); | ||
123 | } | ||
124 | 86 | ||
125 | /* Deliver cached events for old pending events, if current conntrack != old */ | 87 | unsigned int nf_ct_log_invalid __read_mostly; |
126 | void __nf_ct_event_cache_init(struct nf_conn *ct) | 88 | LIST_HEAD(unconfirmed); |
127 | { | 89 | static int nf_conntrack_vmalloc __read_mostly; |
128 | struct nf_conntrack_ecache *ecache; | ||
129 | |||
130 | /* take care of delivering potentially old events */ | ||
131 | ecache = &__get_cpu_var(nf_conntrack_ecache); | ||
132 | BUG_ON(ecache->ct == ct); | ||
133 | if (ecache->ct) | ||
134 | __nf_ct_deliver_cached_events(ecache); | ||
135 | /* initialize for this conntrack/packet */ | ||
136 | ecache->ct = ct; | ||
137 | nf_conntrack_get(&ct->ct_general); | ||
138 | } | ||
139 | |||
140 | /* flush the event cache - touches other CPU's data and must not be called | ||
141 | * while packets are still passing through the code */ | ||
142 | static void nf_ct_event_cache_flush(void) | ||
143 | { | ||
144 | struct nf_conntrack_ecache *ecache; | ||
145 | int cpu; | ||
146 | 90 | ||
147 | for_each_possible_cpu(cpu) { | 91 | static unsigned int nf_conntrack_next_id; |
148 | ecache = &per_cpu(nf_conntrack_ecache, cpu); | ||
149 | if (ecache->ct) | ||
150 | nf_ct_put(ecache->ct); | ||
151 | } | ||
152 | } | ||
153 | #else | ||
154 | static inline void nf_ct_event_cache_flush(void) {} | ||
155 | #endif /* CONFIG_NF_CONNTRACK_EVENTS */ | ||
156 | 92 | ||
157 | DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); | 93 | DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); |
158 | EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat); | 94 | EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat); |
@@ -184,85 +120,6 @@ DEFINE_RWLOCK(nf_ct_cache_lock); | |||
184 | /* This avoids calling kmem_cache_create() with same name simultaneously */ | 120 | /* This avoids calling kmem_cache_create() with same name simultaneously */ |
185 | static DEFINE_MUTEX(nf_ct_cache_mutex); | 121 | static DEFINE_MUTEX(nf_ct_cache_mutex); |
186 | 122 | ||
187 | extern struct nf_conntrack_protocol nf_conntrack_generic_protocol; | ||
188 | struct nf_conntrack_protocol * | ||
189 | __nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol) | ||
190 | { | ||
191 | if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL)) | ||
192 | return &nf_conntrack_generic_protocol; | ||
193 | |||
194 | return nf_ct_protos[l3proto][protocol]; | ||
195 | } | ||
196 | |||
197 | /* this is guaranteed to always return a valid protocol helper, since | ||
198 | * it falls back to generic_protocol */ | ||
199 | struct nf_conntrack_protocol * | ||
200 | nf_ct_proto_find_get(u_int16_t l3proto, u_int8_t protocol) | ||
201 | { | ||
202 | struct nf_conntrack_protocol *p; | ||
203 | |||
204 | preempt_disable(); | ||
205 | p = __nf_ct_proto_find(l3proto, protocol); | ||
206 | if (!try_module_get(p->me)) | ||
207 | p = &nf_conntrack_generic_protocol; | ||
208 | preempt_enable(); | ||
209 | |||
210 | return p; | ||
211 | } | ||
212 | |||
213 | void nf_ct_proto_put(struct nf_conntrack_protocol *p) | ||
214 | { | ||
215 | module_put(p->me); | ||
216 | } | ||
217 | |||
218 | struct nf_conntrack_l3proto * | ||
219 | nf_ct_l3proto_find_get(u_int16_t l3proto) | ||
220 | { | ||
221 | struct nf_conntrack_l3proto *p; | ||
222 | |||
223 | preempt_disable(); | ||
224 | p = __nf_ct_l3proto_find(l3proto); | ||
225 | if (!try_module_get(p->me)) | ||
226 | p = &nf_conntrack_generic_l3proto; | ||
227 | preempt_enable(); | ||
228 | |||
229 | return p; | ||
230 | } | ||
231 | |||
232 | void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p) | ||
233 | { | ||
234 | module_put(p->me); | ||
235 | } | ||
236 | |||
237 | int | ||
238 | nf_ct_l3proto_try_module_get(unsigned short l3proto) | ||
239 | { | ||
240 | int ret; | ||
241 | struct nf_conntrack_l3proto *p; | ||
242 | |||
243 | retry: p = nf_ct_l3proto_find_get(l3proto); | ||
244 | if (p == &nf_conntrack_generic_l3proto) { | ||
245 | ret = request_module("nf_conntrack-%d", l3proto); | ||
246 | if (!ret) | ||
247 | goto retry; | ||
248 | |||
249 | return -EPROTOTYPE; | ||
250 | } | ||
251 | |||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | void nf_ct_l3proto_module_put(unsigned short l3proto) | ||
256 | { | ||
257 | struct nf_conntrack_l3proto *p; | ||
258 | |||
259 | preempt_disable(); | ||
260 | p = __nf_ct_l3proto_find(l3proto); | ||
261 | preempt_enable(); | ||
262 | |||
263 | module_put(p->me); | ||
264 | } | ||
265 | |||
266 | static int nf_conntrack_hash_rnd_initted; | 123 | static int nf_conntrack_hash_rnd_initted; |
267 | static unsigned int nf_conntrack_hash_rnd; | 124 | static unsigned int nf_conntrack_hash_rnd; |
268 | 125 | ||
@@ -363,6 +220,7 @@ out_up_mutex: | |||
363 | mutex_unlock(&nf_ct_cache_mutex); | 220 | mutex_unlock(&nf_ct_cache_mutex); |
364 | return ret; | 221 | return ret; |
365 | } | 222 | } |
223 | EXPORT_SYMBOL_GPL(nf_conntrack_register_cache); | ||
366 | 224 | ||
367 | /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ | 225 | /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ |
368 | void nf_conntrack_unregister_cache(u_int32_t features) | 226 | void nf_conntrack_unregister_cache(u_int32_t features) |
@@ -397,6 +255,7 @@ void nf_conntrack_unregister_cache(u_int32_t features) | |||
397 | 255 | ||
398 | mutex_unlock(&nf_ct_cache_mutex); | 256 | mutex_unlock(&nf_ct_cache_mutex); |
399 | } | 257 | } |
258 | EXPORT_SYMBOL_GPL(nf_conntrack_unregister_cache); | ||
400 | 259 | ||
401 | int | 260 | int |
402 | nf_ct_get_tuple(const struct sk_buff *skb, | 261 | nf_ct_get_tuple(const struct sk_buff *skb, |
@@ -406,7 +265,7 @@ nf_ct_get_tuple(const struct sk_buff *skb, | |||
406 | u_int8_t protonum, | 265 | u_int8_t protonum, |
407 | struct nf_conntrack_tuple *tuple, | 266 | struct nf_conntrack_tuple *tuple, |
408 | const struct nf_conntrack_l3proto *l3proto, | 267 | const struct nf_conntrack_l3proto *l3proto, |
409 | const struct nf_conntrack_protocol *protocol) | 268 | const struct nf_conntrack_l4proto *l4proto) |
410 | { | 269 | { |
411 | NF_CT_TUPLE_U_BLANK(tuple); | 270 | NF_CT_TUPLE_U_BLANK(tuple); |
412 | 271 | ||
@@ -417,14 +276,15 @@ nf_ct_get_tuple(const struct sk_buff *skb, | |||
417 | tuple->dst.protonum = protonum; | 276 | tuple->dst.protonum = protonum; |
418 | tuple->dst.dir = IP_CT_DIR_ORIGINAL; | 277 | tuple->dst.dir = IP_CT_DIR_ORIGINAL; |
419 | 278 | ||
420 | return protocol->pkt_to_tuple(skb, dataoff, tuple); | 279 | return l4proto->pkt_to_tuple(skb, dataoff, tuple); |
421 | } | 280 | } |
281 | EXPORT_SYMBOL_GPL(nf_ct_get_tuple); | ||
422 | 282 | ||
423 | int | 283 | int |
424 | nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, | 284 | nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, |
425 | const struct nf_conntrack_tuple *orig, | 285 | const struct nf_conntrack_tuple *orig, |
426 | const struct nf_conntrack_l3proto *l3proto, | 286 | const struct nf_conntrack_l3proto *l3proto, |
427 | const struct nf_conntrack_protocol *protocol) | 287 | const struct nf_conntrack_l4proto *l4proto) |
428 | { | 288 | { |
429 | NF_CT_TUPLE_U_BLANK(inverse); | 289 | NF_CT_TUPLE_U_BLANK(inverse); |
430 | 290 | ||
@@ -435,111 +295,14 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, | |||
435 | inverse->dst.dir = !orig->dst.dir; | 295 | inverse->dst.dir = !orig->dst.dir; |
436 | 296 | ||
437 | inverse->dst.protonum = orig->dst.protonum; | 297 | inverse->dst.protonum = orig->dst.protonum; |
438 | return protocol->invert_tuple(inverse, orig); | 298 | return l4proto->invert_tuple(inverse, orig); |
439 | } | ||
440 | |||
441 | /* nf_conntrack_expect helper functions */ | ||
442 | void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) | ||
443 | { | ||
444 | struct nf_conn_help *master_help = nfct_help(exp->master); | ||
445 | |||
446 | NF_CT_ASSERT(master_help); | ||
447 | ASSERT_WRITE_LOCK(&nf_conntrack_lock); | ||
448 | NF_CT_ASSERT(!timer_pending(&exp->timeout)); | ||
449 | |||
450 | list_del(&exp->list); | ||
451 | NF_CT_STAT_INC(expect_delete); | ||
452 | master_help->expecting--; | ||
453 | nf_conntrack_expect_put(exp); | ||
454 | } | ||
455 | |||
456 | static void expectation_timed_out(unsigned long ul_expect) | ||
457 | { | ||
458 | struct nf_conntrack_expect *exp = (void *)ul_expect; | ||
459 | |||
460 | write_lock_bh(&nf_conntrack_lock); | ||
461 | nf_ct_unlink_expect(exp); | ||
462 | write_unlock_bh(&nf_conntrack_lock); | ||
463 | nf_conntrack_expect_put(exp); | ||
464 | } | ||
465 | |||
466 | struct nf_conntrack_expect * | ||
467 | __nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple) | ||
468 | { | ||
469 | struct nf_conntrack_expect *i; | ||
470 | |||
471 | list_for_each_entry(i, &nf_conntrack_expect_list, list) { | ||
472 | if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) { | ||
473 | atomic_inc(&i->use); | ||
474 | return i; | ||
475 | } | ||
476 | } | ||
477 | return NULL; | ||
478 | } | ||
479 | |||
480 | /* Just find a expectation corresponding to a tuple. */ | ||
481 | struct nf_conntrack_expect * | ||
482 | nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple) | ||
483 | { | ||
484 | struct nf_conntrack_expect *i; | ||
485 | |||
486 | read_lock_bh(&nf_conntrack_lock); | ||
487 | i = __nf_conntrack_expect_find(tuple); | ||
488 | read_unlock_bh(&nf_conntrack_lock); | ||
489 | |||
490 | return i; | ||
491 | } | ||
492 | |||
493 | /* If an expectation for this connection is found, it gets delete from | ||
494 | * global list then returned. */ | ||
495 | static struct nf_conntrack_expect * | ||
496 | find_expectation(const struct nf_conntrack_tuple *tuple) | ||
497 | { | ||
498 | struct nf_conntrack_expect *i; | ||
499 | |||
500 | list_for_each_entry(i, &nf_conntrack_expect_list, list) { | ||
501 | /* If master is not in hash table yet (ie. packet hasn't left | ||
502 | this machine yet), how can other end know about expected? | ||
503 | Hence these are not the droids you are looking for (if | ||
504 | master ct never got confirmed, we'd hold a reference to it | ||
505 | and weird things would happen to future packets). */ | ||
506 | if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) | ||
507 | && nf_ct_is_confirmed(i->master)) { | ||
508 | if (i->flags & NF_CT_EXPECT_PERMANENT) { | ||
509 | atomic_inc(&i->use); | ||
510 | return i; | ||
511 | } else if (del_timer(&i->timeout)) { | ||
512 | nf_ct_unlink_expect(i); | ||
513 | return i; | ||
514 | } | ||
515 | } | ||
516 | } | ||
517 | return NULL; | ||
518 | } | ||
519 | |||
520 | /* delete all expectations for this conntrack */ | ||
521 | void nf_ct_remove_expectations(struct nf_conn *ct) | ||
522 | { | ||
523 | struct nf_conntrack_expect *i, *tmp; | ||
524 | struct nf_conn_help *help = nfct_help(ct); | ||
525 | |||
526 | /* Optimization: most connection never expect any others. */ | ||
527 | if (!help || help->expecting == 0) | ||
528 | return; | ||
529 | |||
530 | list_for_each_entry_safe(i, tmp, &nf_conntrack_expect_list, list) { | ||
531 | if (i->master == ct && del_timer(&i->timeout)) { | ||
532 | nf_ct_unlink_expect(i); | ||
533 | nf_conntrack_expect_put(i); | ||
534 | } | ||
535 | } | ||
536 | } | 299 | } |
300 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); | ||
537 | 301 | ||
538 | static void | 302 | static void |
539 | clean_from_lists(struct nf_conn *ct) | 303 | clean_from_lists(struct nf_conn *ct) |
540 | { | 304 | { |
541 | DEBUGP("clean_from_lists(%p)\n", ct); | 305 | DEBUGP("clean_from_lists(%p)\n", ct); |
542 | ASSERT_WRITE_LOCK(&nf_conntrack_lock); | ||
543 | list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); | 306 | list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); |
544 | list_del(&ct->tuplehash[IP_CT_DIR_REPLY].list); | 307 | list_del(&ct->tuplehash[IP_CT_DIR_REPLY].list); |
545 | 308 | ||
@@ -551,8 +314,9 @@ static void | |||
551 | destroy_conntrack(struct nf_conntrack *nfct) | 314 | destroy_conntrack(struct nf_conntrack *nfct) |
552 | { | 315 | { |
553 | struct nf_conn *ct = (struct nf_conn *)nfct; | 316 | struct nf_conn *ct = (struct nf_conn *)nfct; |
317 | struct nf_conn_help *help = nfct_help(ct); | ||
554 | struct nf_conntrack_l3proto *l3proto; | 318 | struct nf_conntrack_l3proto *l3proto; |
555 | struct nf_conntrack_protocol *proto; | 319 | struct nf_conntrack_l4proto *l4proto; |
556 | 320 | ||
557 | DEBUGP("destroy_conntrack(%p)\n", ct); | 321 | DEBUGP("destroy_conntrack(%p)\n", ct); |
558 | NF_CT_ASSERT(atomic_read(&nfct->use) == 0); | 322 | NF_CT_ASSERT(atomic_read(&nfct->use) == 0); |
@@ -561,6 +325,9 @@ destroy_conntrack(struct nf_conntrack *nfct) | |||
561 | nf_conntrack_event(IPCT_DESTROY, ct); | 325 | nf_conntrack_event(IPCT_DESTROY, ct); |
562 | set_bit(IPS_DYING_BIT, &ct->status); | 326 | set_bit(IPS_DYING_BIT, &ct->status); |
563 | 327 | ||
328 | if (help && help->helper && help->helper->destroy) | ||
329 | help->helper->destroy(ct); | ||
330 | |||
564 | /* To make sure we don't get any weird locking issues here: | 331 | /* To make sure we don't get any weird locking issues here: |
565 | * destroy_conntrack() MUST NOT be called with a write lock | 332 | * destroy_conntrack() MUST NOT be called with a write lock |
566 | * to nf_conntrack_lock!!! -HW */ | 333 | * to nf_conntrack_lock!!! -HW */ |
@@ -568,9 +335,9 @@ destroy_conntrack(struct nf_conntrack *nfct) | |||
568 | if (l3proto && l3proto->destroy) | 335 | if (l3proto && l3proto->destroy) |
569 | l3proto->destroy(ct); | 336 | l3proto->destroy(ct); |
570 | 337 | ||
571 | proto = __nf_ct_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum); | 338 | l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum); |
572 | if (proto && proto->destroy) | 339 | if (l4proto && l4proto->destroy) |
573 | proto->destroy(ct); | 340 | l4proto->destroy(ct); |
574 | 341 | ||
575 | if (nf_conntrack_destroyed) | 342 | if (nf_conntrack_destroyed) |
576 | nf_conntrack_destroyed(ct); | 343 | nf_conntrack_destroyed(ct); |
@@ -618,7 +385,6 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple, | |||
618 | struct nf_conntrack_tuple_hash *h; | 385 | struct nf_conntrack_tuple_hash *h; |
619 | unsigned int hash = hash_conntrack(tuple); | 386 | unsigned int hash = hash_conntrack(tuple); |
620 | 387 | ||
621 | ASSERT_READ_LOCK(&nf_conntrack_lock); | ||
622 | list_for_each_entry(h, &nf_conntrack_hash[hash], list) { | 388 | list_for_each_entry(h, &nf_conntrack_hash[hash], list) { |
623 | if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && | 389 | if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && |
624 | nf_ct_tuple_equal(tuple, &h->tuple)) { | 390 | nf_ct_tuple_equal(tuple, &h->tuple)) { |
@@ -630,6 +396,7 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple, | |||
630 | 396 | ||
631 | return NULL; | 397 | return NULL; |
632 | } | 398 | } |
399 | EXPORT_SYMBOL_GPL(__nf_conntrack_find); | ||
633 | 400 | ||
634 | /* Find a connection corresponding to a tuple. */ | 401 | /* Find a connection corresponding to a tuple. */ |
635 | struct nf_conntrack_tuple_hash * | 402 | struct nf_conntrack_tuple_hash * |
@@ -646,6 +413,7 @@ nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple, | |||
646 | 413 | ||
647 | return h; | 414 | return h; |
648 | } | 415 | } |
416 | EXPORT_SYMBOL_GPL(nf_conntrack_find_get); | ||
649 | 417 | ||
650 | static void __nf_conntrack_hash_insert(struct nf_conn *ct, | 418 | static void __nf_conntrack_hash_insert(struct nf_conn *ct, |
651 | unsigned int hash, | 419 | unsigned int hash, |
@@ -669,6 +437,7 @@ void nf_conntrack_hash_insert(struct nf_conn *ct) | |||
669 | __nf_conntrack_hash_insert(ct, hash, repl_hash); | 437 | __nf_conntrack_hash_insert(ct, hash, repl_hash); |
670 | write_unlock_bh(&nf_conntrack_lock); | 438 | write_unlock_bh(&nf_conntrack_lock); |
671 | } | 439 | } |
440 | EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); | ||
672 | 441 | ||
673 | /* Confirm a connection given skb; places it in hash table */ | 442 | /* Confirm a connection given skb; places it in hash table */ |
674 | int | 443 | int |
@@ -746,6 +515,7 @@ out: | |||
746 | write_unlock_bh(&nf_conntrack_lock); | 515 | write_unlock_bh(&nf_conntrack_lock); |
747 | return NF_DROP; | 516 | return NF_DROP; |
748 | } | 517 | } |
518 | EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); | ||
749 | 519 | ||
750 | /* Returns true if a connection correspondings to the tuple (required | 520 | /* Returns true if a connection correspondings to the tuple (required |
751 | for NAT). */ | 521 | for NAT). */ |
@@ -761,6 +531,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, | |||
761 | 531 | ||
762 | return h != NULL; | 532 | return h != NULL; |
763 | } | 533 | } |
534 | EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); | ||
764 | 535 | ||
765 | /* There's a small race here where we may free a just-assured | 536 | /* There's a small race here where we may free a just-assured |
766 | connection. Too bad: we're in trouble anyway. */ | 537 | connection. Too bad: we're in trouble anyway. */ |
@@ -794,53 +565,13 @@ static int early_drop(struct list_head *chain) | |||
794 | return dropped; | 565 | return dropped; |
795 | } | 566 | } |
796 | 567 | ||
797 | static struct nf_conntrack_helper * | ||
798 | __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple) | ||
799 | { | ||
800 | struct nf_conntrack_helper *h; | ||
801 | |||
802 | list_for_each_entry(h, &helpers, list) { | ||
803 | if (nf_ct_tuple_mask_cmp(tuple, &h->tuple, &h->mask)) | ||
804 | return h; | ||
805 | } | ||
806 | return NULL; | ||
807 | } | ||
808 | |||
809 | struct nf_conntrack_helper * | ||
810 | nf_ct_helper_find_get( const struct nf_conntrack_tuple *tuple) | ||
811 | { | ||
812 | struct nf_conntrack_helper *helper; | ||
813 | |||
814 | /* need nf_conntrack_lock to assure that helper exists until | ||
815 | * try_module_get() is called */ | ||
816 | read_lock_bh(&nf_conntrack_lock); | ||
817 | |||
818 | helper = __nf_ct_helper_find(tuple); | ||
819 | if (helper) { | ||
820 | /* need to increase module usage count to assure helper will | ||
821 | * not go away while the caller is e.g. busy putting a | ||
822 | * conntrack in the hash that uses the helper */ | ||
823 | if (!try_module_get(helper->me)) | ||
824 | helper = NULL; | ||
825 | } | ||
826 | |||
827 | read_unlock_bh(&nf_conntrack_lock); | ||
828 | |||
829 | return helper; | ||
830 | } | ||
831 | |||
832 | void nf_ct_helper_put(struct nf_conntrack_helper *helper) | ||
833 | { | ||
834 | module_put(helper->me); | ||
835 | } | ||
836 | |||
837 | static struct nf_conn * | 568 | static struct nf_conn * |
838 | __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, | 569 | __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, |
839 | const struct nf_conntrack_tuple *repl, | 570 | const struct nf_conntrack_tuple *repl, |
840 | const struct nf_conntrack_l3proto *l3proto) | 571 | const struct nf_conntrack_l3proto *l3proto, |
572 | u_int32_t features) | ||
841 | { | 573 | { |
842 | struct nf_conn *conntrack = NULL; | 574 | struct nf_conn *conntrack = NULL; |
843 | u_int32_t features = 0; | ||
844 | struct nf_conntrack_helper *helper; | 575 | struct nf_conntrack_helper *helper; |
845 | 576 | ||
846 | if (unlikely(!nf_conntrack_hash_rnd_initted)) { | 577 | if (unlikely(!nf_conntrack_hash_rnd_initted)) { |
@@ -866,12 +597,13 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, | |||
866 | } | 597 | } |
867 | 598 | ||
868 | /* find features needed by this conntrack. */ | 599 | /* find features needed by this conntrack. */ |
869 | features = l3proto->get_features(orig); | 600 | features |= l3proto->get_features(orig); |
870 | 601 | ||
871 | /* FIXME: protect helper list per RCU */ | 602 | /* FIXME: protect helper list per RCU */ |
872 | read_lock_bh(&nf_conntrack_lock); | 603 | read_lock_bh(&nf_conntrack_lock); |
873 | helper = __nf_ct_helper_find(repl); | 604 | helper = __nf_ct_helper_find(repl); |
874 | if (helper) | 605 | /* NAT might want to assign a helper later */ |
606 | if (helper || features & NF_CT_F_NAT) | ||
875 | features |= NF_CT_F_HELP; | 607 | features |= NF_CT_F_HELP; |
876 | read_unlock_bh(&nf_conntrack_lock); | 608 | read_unlock_bh(&nf_conntrack_lock); |
877 | 609 | ||
@@ -893,12 +625,6 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, | |||
893 | 625 | ||
894 | memset(conntrack, 0, nf_ct_cache[features].size); | 626 | memset(conntrack, 0, nf_ct_cache[features].size); |
895 | conntrack->features = features; | 627 | conntrack->features = features; |
896 | if (helper) { | ||
897 | struct nf_conn_help *help = nfct_help(conntrack); | ||
898 | NF_CT_ASSERT(help); | ||
899 | help->helper = helper; | ||
900 | } | ||
901 | |||
902 | atomic_set(&conntrack->ct_general.use, 1); | 628 | atomic_set(&conntrack->ct_general.use, 1); |
903 | conntrack->ct_general.destroy = destroy_conntrack; | 629 | conntrack->ct_general.destroy = destroy_conntrack; |
904 | conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; | 630 | conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; |
@@ -922,8 +648,9 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, | |||
922 | struct nf_conntrack_l3proto *l3proto; | 648 | struct nf_conntrack_l3proto *l3proto; |
923 | 649 | ||
924 | l3proto = __nf_ct_l3proto_find(orig->src.l3num); | 650 | l3proto = __nf_ct_l3proto_find(orig->src.l3num); |
925 | return __nf_conntrack_alloc(orig, repl, l3proto); | 651 | return __nf_conntrack_alloc(orig, repl, l3proto, 0); |
926 | } | 652 | } |
653 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); | ||
927 | 654 | ||
928 | void nf_conntrack_free(struct nf_conn *conntrack) | 655 | void nf_conntrack_free(struct nf_conn *conntrack) |
929 | { | 656 | { |
@@ -934,32 +661,40 @@ void nf_conntrack_free(struct nf_conn *conntrack) | |||
934 | kmem_cache_free(nf_ct_cache[features].cachep, conntrack); | 661 | kmem_cache_free(nf_ct_cache[features].cachep, conntrack); |
935 | atomic_dec(&nf_conntrack_count); | 662 | atomic_dec(&nf_conntrack_count); |
936 | } | 663 | } |
664 | EXPORT_SYMBOL_GPL(nf_conntrack_free); | ||
937 | 665 | ||
938 | /* Allocate a new conntrack: we return -ENOMEM if classification | 666 | /* Allocate a new conntrack: we return -ENOMEM if classification |
939 | failed due to stress. Otherwise it really is unclassifiable. */ | 667 | failed due to stress. Otherwise it really is unclassifiable. */ |
940 | static struct nf_conntrack_tuple_hash * | 668 | static struct nf_conntrack_tuple_hash * |
941 | init_conntrack(const struct nf_conntrack_tuple *tuple, | 669 | init_conntrack(const struct nf_conntrack_tuple *tuple, |
942 | struct nf_conntrack_l3proto *l3proto, | 670 | struct nf_conntrack_l3proto *l3proto, |
943 | struct nf_conntrack_protocol *protocol, | 671 | struct nf_conntrack_l4proto *l4proto, |
944 | struct sk_buff *skb, | 672 | struct sk_buff *skb, |
945 | unsigned int dataoff) | 673 | unsigned int dataoff) |
946 | { | 674 | { |
947 | struct nf_conn *conntrack; | 675 | struct nf_conn *conntrack; |
948 | struct nf_conntrack_tuple repl_tuple; | 676 | struct nf_conntrack_tuple repl_tuple; |
949 | struct nf_conntrack_expect *exp; | 677 | struct nf_conntrack_expect *exp; |
678 | u_int32_t features = 0; | ||
950 | 679 | ||
951 | if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, protocol)) { | 680 | if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { |
952 | DEBUGP("Can't invert tuple.\n"); | 681 | DEBUGP("Can't invert tuple.\n"); |
953 | return NULL; | 682 | return NULL; |
954 | } | 683 | } |
955 | 684 | ||
956 | conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto); | 685 | read_lock_bh(&nf_conntrack_lock); |
686 | exp = __nf_conntrack_expect_find(tuple); | ||
687 | if (exp && exp->helper) | ||
688 | features = NF_CT_F_HELP; | ||
689 | read_unlock_bh(&nf_conntrack_lock); | ||
690 | |||
691 | conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto, features); | ||
957 | if (conntrack == NULL || IS_ERR(conntrack)) { | 692 | if (conntrack == NULL || IS_ERR(conntrack)) { |
958 | DEBUGP("Can't allocate conntrack.\n"); | 693 | DEBUGP("Can't allocate conntrack.\n"); |
959 | return (struct nf_conntrack_tuple_hash *)conntrack; | 694 | return (struct nf_conntrack_tuple_hash *)conntrack; |
960 | } | 695 | } |
961 | 696 | ||
962 | if (!protocol->new(conntrack, skb, dataoff)) { | 697 | if (!l4proto->new(conntrack, skb, dataoff)) { |
963 | nf_conntrack_free(conntrack); | 698 | nf_conntrack_free(conntrack); |
964 | DEBUGP("init conntrack: can't track with proto module\n"); | 699 | DEBUGP("init conntrack: can't track with proto module\n"); |
965 | return NULL; | 700 | return NULL; |
@@ -974,6 +709,8 @@ init_conntrack(const struct nf_conntrack_tuple *tuple, | |||
974 | /* Welcome, Mr. Bond. We've been expecting you... */ | 709 | /* Welcome, Mr. Bond. We've been expecting you... */ |
975 | __set_bit(IPS_EXPECTED_BIT, &conntrack->status); | 710 | __set_bit(IPS_EXPECTED_BIT, &conntrack->status); |
976 | conntrack->master = exp->master; | 711 | conntrack->master = exp->master; |
712 | if (exp->helper) | ||
713 | nfct_help(conntrack)->helper = exp->helper; | ||
977 | #ifdef CONFIG_NF_CONNTRACK_MARK | 714 | #ifdef CONFIG_NF_CONNTRACK_MARK |
978 | conntrack->mark = exp->master->mark; | 715 | conntrack->mark = exp->master->mark; |
979 | #endif | 716 | #endif |
@@ -982,8 +719,13 @@ init_conntrack(const struct nf_conntrack_tuple *tuple, | |||
982 | #endif | 719 | #endif |
983 | nf_conntrack_get(&conntrack->master->ct_general); | 720 | nf_conntrack_get(&conntrack->master->ct_general); |
984 | NF_CT_STAT_INC(expect_new); | 721 | NF_CT_STAT_INC(expect_new); |
985 | } else | 722 | } else { |
723 | struct nf_conn_help *help = nfct_help(conntrack); | ||
724 | |||
725 | if (help) | ||
726 | help->helper = __nf_ct_helper_find(&repl_tuple); | ||
986 | NF_CT_STAT_INC(new); | 727 | NF_CT_STAT_INC(new); |
728 | } | ||
987 | 729 | ||
988 | /* Overload tuple linked list to put us in unconfirmed list. */ | 730 | /* Overload tuple linked list to put us in unconfirmed list. */ |
989 | list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed); | 731 | list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed); |
@@ -1006,7 +748,7 @@ resolve_normal_ct(struct sk_buff *skb, | |||
1006 | u_int16_t l3num, | 748 | u_int16_t l3num, |
1007 | u_int8_t protonum, | 749 | u_int8_t protonum, |
1008 | struct nf_conntrack_l3proto *l3proto, | 750 | struct nf_conntrack_l3proto *l3proto, |
1009 | struct nf_conntrack_protocol *proto, | 751 | struct nf_conntrack_l4proto *l4proto, |
1010 | int *set_reply, | 752 | int *set_reply, |
1011 | enum ip_conntrack_info *ctinfo) | 753 | enum ip_conntrack_info *ctinfo) |
1012 | { | 754 | { |
@@ -1016,7 +758,7 @@ resolve_normal_ct(struct sk_buff *skb, | |||
1016 | 758 | ||
1017 | if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data), | 759 | if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data), |
1018 | dataoff, l3num, protonum, &tuple, l3proto, | 760 | dataoff, l3num, protonum, &tuple, l3proto, |
1019 | proto)) { | 761 | l4proto)) { |
1020 | DEBUGP("resolve_normal_ct: Can't get tuple\n"); | 762 | DEBUGP("resolve_normal_ct: Can't get tuple\n"); |
1021 | return NULL; | 763 | return NULL; |
1022 | } | 764 | } |
@@ -1024,7 +766,7 @@ resolve_normal_ct(struct sk_buff *skb, | |||
1024 | /* look for tuple match */ | 766 | /* look for tuple match */ |
1025 | h = nf_conntrack_find_get(&tuple, NULL); | 767 | h = nf_conntrack_find_get(&tuple, NULL); |
1026 | if (!h) { | 768 | if (!h) { |
1027 | h = init_conntrack(&tuple, l3proto, proto, skb, dataoff); | 769 | h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff); |
1028 | if (!h) | 770 | if (!h) |
1029 | return NULL; | 771 | return NULL; |
1030 | if (IS_ERR(h)) | 772 | if (IS_ERR(h)) |
@@ -1062,7 +804,7 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb) | |||
1062 | struct nf_conn *ct; | 804 | struct nf_conn *ct; |
1063 | enum ip_conntrack_info ctinfo; | 805 | enum ip_conntrack_info ctinfo; |
1064 | struct nf_conntrack_l3proto *l3proto; | 806 | struct nf_conntrack_l3proto *l3proto; |
1065 | struct nf_conntrack_protocol *proto; | 807 | struct nf_conntrack_l4proto *l4proto; |
1066 | unsigned int dataoff; | 808 | unsigned int dataoff; |
1067 | u_int8_t protonum; | 809 | u_int8_t protonum; |
1068 | int set_reply = 0; | 810 | int set_reply = 0; |
@@ -1080,19 +822,19 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb) | |||
1080 | return -ret; | 822 | return -ret; |
1081 | } | 823 | } |
1082 | 824 | ||
1083 | proto = __nf_ct_proto_find((u_int16_t)pf, protonum); | 825 | l4proto = __nf_ct_l4proto_find((u_int16_t)pf, protonum); |
1084 | 826 | ||
1085 | /* It may be an special packet, error, unclean... | 827 | /* It may be an special packet, error, unclean... |
1086 | * inverse of the return code tells to the netfilter | 828 | * inverse of the return code tells to the netfilter |
1087 | * core what to do with the packet. */ | 829 | * core what to do with the packet. */ |
1088 | if (proto->error != NULL && | 830 | if (l4proto->error != NULL && |
1089 | (ret = proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) { | 831 | (ret = l4proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) { |
1090 | NF_CT_STAT_INC(error); | 832 | NF_CT_STAT_INC(error); |
1091 | NF_CT_STAT_INC(invalid); | 833 | NF_CT_STAT_INC(invalid); |
1092 | return -ret; | 834 | return -ret; |
1093 | } | 835 | } |
1094 | 836 | ||
1095 | ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, proto, | 837 | ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, l4proto, |
1096 | &set_reply, &ctinfo); | 838 | &set_reply, &ctinfo); |
1097 | if (!ct) { | 839 | if (!ct) { |
1098 | /* Not valid part of a connection */ | 840 | /* Not valid part of a connection */ |
@@ -1108,7 +850,7 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb) | |||
1108 | 850 | ||
1109 | NF_CT_ASSERT((*pskb)->nfct); | 851 | NF_CT_ASSERT((*pskb)->nfct); |
1110 | 852 | ||
1111 | ret = proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum); | 853 | ret = l4proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum); |
1112 | if (ret < 0) { | 854 | if (ret < 0) { |
1113 | /* Invalid: inverse of the return code tells | 855 | /* Invalid: inverse of the return code tells |
1114 | * the netfilter core what to do */ | 856 | * the netfilter core what to do */ |
@@ -1124,255 +866,38 @@ nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb) | |||
1124 | 866 | ||
1125 | return ret; | 867 | return ret; |
1126 | } | 868 | } |
869 | EXPORT_SYMBOL_GPL(nf_conntrack_in); | ||
1127 | 870 | ||
1128 | int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, | 871 | int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, |
1129 | const struct nf_conntrack_tuple *orig) | 872 | const struct nf_conntrack_tuple *orig) |
1130 | { | 873 | { |
1131 | return nf_ct_invert_tuple(inverse, orig, | 874 | return nf_ct_invert_tuple(inverse, orig, |
1132 | __nf_ct_l3proto_find(orig->src.l3num), | 875 | __nf_ct_l3proto_find(orig->src.l3num), |
1133 | __nf_ct_proto_find(orig->src.l3num, | 876 | __nf_ct_l4proto_find(orig->src.l3num, |
1134 | orig->dst.protonum)); | 877 | orig->dst.protonum)); |
1135 | } | 878 | } |
879 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); | ||
1136 | 880 | ||
1137 | /* Would two expected things clash? */ | 881 | /* Alter reply tuple (maybe alter helper). This is for NAT, and is |
1138 | static inline int expect_clash(const struct nf_conntrack_expect *a, | 882 | implicitly racy: see __nf_conntrack_confirm */ |
1139 | const struct nf_conntrack_expect *b) | 883 | void nf_conntrack_alter_reply(struct nf_conn *ct, |
1140 | { | 884 | const struct nf_conntrack_tuple *newreply) |
1141 | /* Part covered by intersection of masks must be unequal, | ||
1142 | otherwise they clash */ | ||
1143 | struct nf_conntrack_tuple intersect_mask; | ||
1144 | int count; | ||
1145 | |||
1146 | intersect_mask.src.l3num = a->mask.src.l3num & b->mask.src.l3num; | ||
1147 | intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all; | ||
1148 | intersect_mask.dst.u.all = a->mask.dst.u.all & b->mask.dst.u.all; | ||
1149 | intersect_mask.dst.protonum = a->mask.dst.protonum | ||
1150 | & b->mask.dst.protonum; | ||
1151 | |||
1152 | for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){ | ||
1153 | intersect_mask.src.u3.all[count] = | ||
1154 | a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; | ||
1155 | } | ||
1156 | |||
1157 | for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){ | ||
1158 | intersect_mask.dst.u3.all[count] = | ||
1159 | a->mask.dst.u3.all[count] & b->mask.dst.u3.all[count]; | ||
1160 | } | ||
1161 | |||
1162 | return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); | ||
1163 | } | ||
1164 | |||
1165 | static inline int expect_matches(const struct nf_conntrack_expect *a, | ||
1166 | const struct nf_conntrack_expect *b) | ||
1167 | { | ||
1168 | return a->master == b->master | ||
1169 | && nf_ct_tuple_equal(&a->tuple, &b->tuple) | ||
1170 | && nf_ct_tuple_equal(&a->mask, &b->mask); | ||
1171 | } | ||
1172 | |||
1173 | /* Generally a bad idea to call this: could have matched already. */ | ||
1174 | void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp) | ||
1175 | { | ||
1176 | struct nf_conntrack_expect *i; | ||
1177 | |||
1178 | write_lock_bh(&nf_conntrack_lock); | ||
1179 | /* choose the the oldest expectation to evict */ | ||
1180 | list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) { | ||
1181 | if (expect_matches(i, exp) && del_timer(&i->timeout)) { | ||
1182 | nf_ct_unlink_expect(i); | ||
1183 | write_unlock_bh(&nf_conntrack_lock); | ||
1184 | nf_conntrack_expect_put(i); | ||
1185 | return; | ||
1186 | } | ||
1187 | } | ||
1188 | write_unlock_bh(&nf_conntrack_lock); | ||
1189 | } | ||
1190 | |||
1191 | /* We don't increase the master conntrack refcount for non-fulfilled | ||
1192 | * conntracks. During the conntrack destruction, the expectations are | ||
1193 | * always killed before the conntrack itself */ | ||
1194 | struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me) | ||
1195 | { | ||
1196 | struct nf_conntrack_expect *new; | ||
1197 | |||
1198 | new = kmem_cache_alloc(nf_conntrack_expect_cachep, GFP_ATOMIC); | ||
1199 | if (!new) { | ||
1200 | DEBUGP("expect_related: OOM allocating expect\n"); | ||
1201 | return NULL; | ||
1202 | } | ||
1203 | new->master = me; | ||
1204 | atomic_set(&new->use, 1); | ||
1205 | return new; | ||
1206 | } | ||
1207 | |||
1208 | void nf_conntrack_expect_put(struct nf_conntrack_expect *exp) | ||
1209 | { | ||
1210 | if (atomic_dec_and_test(&exp->use)) | ||
1211 | kmem_cache_free(nf_conntrack_expect_cachep, exp); | ||
1212 | } | ||
1213 | |||
1214 | static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp) | ||
1215 | { | ||
1216 | struct nf_conn_help *master_help = nfct_help(exp->master); | ||
1217 | |||
1218 | atomic_inc(&exp->use); | ||
1219 | master_help->expecting++; | ||
1220 | list_add(&exp->list, &nf_conntrack_expect_list); | ||
1221 | |||
1222 | init_timer(&exp->timeout); | ||
1223 | exp->timeout.data = (unsigned long)exp; | ||
1224 | exp->timeout.function = expectation_timed_out; | ||
1225 | exp->timeout.expires = jiffies + master_help->helper->timeout * HZ; | ||
1226 | add_timer(&exp->timeout); | ||
1227 | |||
1228 | exp->id = ++nf_conntrack_expect_next_id; | ||
1229 | atomic_inc(&exp->use); | ||
1230 | NF_CT_STAT_INC(expect_create); | ||
1231 | } | ||
1232 | |||
1233 | /* Race with expectations being used means we could have none to find; OK. */ | ||
1234 | static void evict_oldest_expect(struct nf_conn *master) | ||
1235 | { | ||
1236 | struct nf_conntrack_expect *i; | ||
1237 | |||
1238 | list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) { | ||
1239 | if (i->master == master) { | ||
1240 | if (del_timer(&i->timeout)) { | ||
1241 | nf_ct_unlink_expect(i); | ||
1242 | nf_conntrack_expect_put(i); | ||
1243 | } | ||
1244 | break; | ||
1245 | } | ||
1246 | } | ||
1247 | } | ||
1248 | |||
1249 | static inline int refresh_timer(struct nf_conntrack_expect *i) | ||
1250 | { | ||
1251 | struct nf_conn_help *master_help = nfct_help(i->master); | ||
1252 | |||
1253 | if (!del_timer(&i->timeout)) | ||
1254 | return 0; | ||
1255 | |||
1256 | i->timeout.expires = jiffies + master_help->helper->timeout*HZ; | ||
1257 | add_timer(&i->timeout); | ||
1258 | return 1; | ||
1259 | } | ||
1260 | |||
1261 | int nf_conntrack_expect_related(struct nf_conntrack_expect *expect) | ||
1262 | { | 885 | { |
1263 | struct nf_conntrack_expect *i; | ||
1264 | struct nf_conn *master = expect->master; | ||
1265 | struct nf_conn_help *master_help = nfct_help(master); | ||
1266 | int ret; | ||
1267 | |||
1268 | NF_CT_ASSERT(master_help); | ||
1269 | |||
1270 | DEBUGP("nf_conntrack_expect_related %p\n", related_to); | ||
1271 | DEBUGP("tuple: "); NF_CT_DUMP_TUPLE(&expect->tuple); | ||
1272 | DEBUGP("mask: "); NF_CT_DUMP_TUPLE(&expect->mask); | ||
1273 | |||
1274 | write_lock_bh(&nf_conntrack_lock); | ||
1275 | list_for_each_entry(i, &nf_conntrack_expect_list, list) { | ||
1276 | if (expect_matches(i, expect)) { | ||
1277 | /* Refresh timer: if it's dying, ignore.. */ | ||
1278 | if (refresh_timer(i)) { | ||
1279 | ret = 0; | ||
1280 | goto out; | ||
1281 | } | ||
1282 | } else if (expect_clash(i, expect)) { | ||
1283 | ret = -EBUSY; | ||
1284 | goto out; | ||
1285 | } | ||
1286 | } | ||
1287 | /* Will be over limit? */ | ||
1288 | if (master_help->helper->max_expected && | ||
1289 | master_help->expecting >= master_help->helper->max_expected) | ||
1290 | evict_oldest_expect(master); | ||
1291 | |||
1292 | nf_conntrack_expect_insert(expect); | ||
1293 | nf_conntrack_expect_event(IPEXP_NEW, expect); | ||
1294 | ret = 0; | ||
1295 | out: | ||
1296 | write_unlock_bh(&nf_conntrack_lock); | ||
1297 | return ret; | ||
1298 | } | ||
1299 | |||
1300 | int nf_conntrack_helper_register(struct nf_conntrack_helper *me) | ||
1301 | { | ||
1302 | int ret; | ||
1303 | BUG_ON(me->timeout == 0); | ||
1304 | |||
1305 | ret = nf_conntrack_register_cache(NF_CT_F_HELP, "nf_conntrack:help", | ||
1306 | sizeof(struct nf_conn) | ||
1307 | + sizeof(struct nf_conn_help) | ||
1308 | + __alignof__(struct nf_conn_help)); | ||
1309 | if (ret < 0) { | ||
1310 | printk(KERN_ERR "nf_conntrack_helper_reigster: Unable to create slab cache for conntracks\n"); | ||
1311 | return ret; | ||
1312 | } | ||
1313 | write_lock_bh(&nf_conntrack_lock); | ||
1314 | list_add(&me->list, &helpers); | ||
1315 | write_unlock_bh(&nf_conntrack_lock); | ||
1316 | |||
1317 | return 0; | ||
1318 | } | ||
1319 | |||
1320 | struct nf_conntrack_helper * | ||
1321 | __nf_conntrack_helper_find_byname(const char *name) | ||
1322 | { | ||
1323 | struct nf_conntrack_helper *h; | ||
1324 | |||
1325 | list_for_each_entry(h, &helpers, list) { | ||
1326 | if (!strcmp(h->name, name)) | ||
1327 | return h; | ||
1328 | } | ||
1329 | |||
1330 | return NULL; | ||
1331 | } | ||
1332 | |||
1333 | static inline void unhelp(struct nf_conntrack_tuple_hash *i, | ||
1334 | const struct nf_conntrack_helper *me) | ||
1335 | { | ||
1336 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i); | ||
1337 | struct nf_conn_help *help = nfct_help(ct); | 886 | struct nf_conn_help *help = nfct_help(ct); |
1338 | 887 | ||
1339 | if (help && help->helper == me) { | ||
1340 | nf_conntrack_event(IPCT_HELPER, ct); | ||
1341 | help->helper = NULL; | ||
1342 | } | ||
1343 | } | ||
1344 | |||
1345 | void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) | ||
1346 | { | ||
1347 | unsigned int i; | ||
1348 | struct nf_conntrack_tuple_hash *h; | ||
1349 | struct nf_conntrack_expect *exp, *tmp; | ||
1350 | |||
1351 | /* Need write lock here, to delete helper. */ | ||
1352 | write_lock_bh(&nf_conntrack_lock); | 888 | write_lock_bh(&nf_conntrack_lock); |
1353 | list_del(&me->list); | 889 | /* Should be unconfirmed, so not in hash table yet */ |
1354 | 890 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); | |
1355 | /* Get rid of expectations */ | ||
1356 | list_for_each_entry_safe(exp, tmp, &nf_conntrack_expect_list, list) { | ||
1357 | struct nf_conn_help *help = nfct_help(exp->master); | ||
1358 | if (help->helper == me && del_timer(&exp->timeout)) { | ||
1359 | nf_ct_unlink_expect(exp); | ||
1360 | nf_conntrack_expect_put(exp); | ||
1361 | } | ||
1362 | } | ||
1363 | 891 | ||
1364 | /* Get rid of expecteds, set helpers to NULL. */ | 892 | DEBUGP("Altering reply tuple of %p to ", ct); |
1365 | list_for_each_entry(h, &unconfirmed, list) | 893 | NF_CT_DUMP_TUPLE(newreply); |
1366 | unhelp(h, me); | ||
1367 | for (i = 0; i < nf_conntrack_htable_size; i++) { | ||
1368 | list_for_each_entry(h, &nf_conntrack_hash[i], list) | ||
1369 | unhelp(h, me); | ||
1370 | } | ||
1371 | write_unlock_bh(&nf_conntrack_lock); | ||
1372 | 894 | ||
1373 | /* Someone could be still looking at the helper in a bh. */ | 895 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; |
1374 | synchronize_net(); | 896 | if (!ct->master && help && help->expecting == 0) |
897 | help->helper = __nf_ct_helper_find(newreply); | ||
898 | write_unlock_bh(&nf_conntrack_lock); | ||
1375 | } | 899 | } |
900 | EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); | ||
1376 | 901 | ||
1377 | /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ | 902 | /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ |
1378 | void __nf_ct_refresh_acct(struct nf_conn *ct, | 903 | void __nf_ct_refresh_acct(struct nf_conn *ct, |
@@ -1399,9 +924,14 @@ void __nf_ct_refresh_acct(struct nf_conn *ct, | |||
1399 | ct->timeout.expires = extra_jiffies; | 924 | ct->timeout.expires = extra_jiffies; |
1400 | event = IPCT_REFRESH; | 925 | event = IPCT_REFRESH; |
1401 | } else { | 926 | } else { |
1402 | /* Need del_timer for race avoidance (may already be dying). */ | 927 | unsigned long newtime = jiffies + extra_jiffies; |
1403 | if (del_timer(&ct->timeout)) { | 928 | |
1404 | ct->timeout.expires = jiffies + extra_jiffies; | 929 | /* Only update the timeout if the new timeout is at least |
930 | HZ jiffies from the old timeout. Need del_timer for race | ||
931 | avoidance (may already be dying). */ | ||
932 | if (newtime - ct->timeout.expires >= HZ | ||
933 | && del_timer(&ct->timeout)) { | ||
934 | ct->timeout.expires = newtime; | ||
1405 | add_timer(&ct->timeout); | 935 | add_timer(&ct->timeout); |
1406 | event = IPCT_REFRESH; | 936 | event = IPCT_REFRESH; |
1407 | } | 937 | } |
@@ -1412,9 +942,10 @@ void __nf_ct_refresh_acct(struct nf_conn *ct, | |||
1412 | ct->counters[CTINFO2DIR(ctinfo)].packets++; | 942 | ct->counters[CTINFO2DIR(ctinfo)].packets++; |
1413 | ct->counters[CTINFO2DIR(ctinfo)].bytes += | 943 | ct->counters[CTINFO2DIR(ctinfo)].bytes += |
1414 | skb->len - (unsigned int)(skb->nh.raw - skb->data); | 944 | skb->len - (unsigned int)(skb->nh.raw - skb->data); |
1415 | if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) | 945 | |
1416 | || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) | 946 | if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) |
1417 | event |= IPCT_COUNTER_FILLING; | 947 | || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) |
948 | event |= IPCT_COUNTER_FILLING; | ||
1418 | } | 949 | } |
1419 | #endif | 950 | #endif |
1420 | 951 | ||
@@ -1424,6 +955,7 @@ void __nf_ct_refresh_acct(struct nf_conn *ct, | |||
1424 | if (event) | 955 | if (event) |
1425 | nf_conntrack_event_cache(event, skb); | 956 | nf_conntrack_event_cache(event, skb); |
1426 | } | 957 | } |
958 | EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); | ||
1427 | 959 | ||
1428 | #if defined(CONFIG_NF_CT_NETLINK) || \ | 960 | #if defined(CONFIG_NF_CT_NETLINK) || \ |
1429 | defined(CONFIG_NF_CT_NETLINK_MODULE) | 961 | defined(CONFIG_NF_CT_NETLINK_MODULE) |
@@ -1448,6 +980,7 @@ int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb, | |||
1448 | nfattr_failure: | 980 | nfattr_failure: |
1449 | return -1; | 981 | return -1; |
1450 | } | 982 | } |
983 | EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nfattr); | ||
1451 | 984 | ||
1452 | static const size_t cta_min_proto[CTA_PROTO_MAX] = { | 985 | static const size_t cta_min_proto[CTA_PROTO_MAX] = { |
1453 | [CTA_PROTO_SRC_PORT-1] = sizeof(u_int16_t), | 986 | [CTA_PROTO_SRC_PORT-1] = sizeof(u_int16_t), |
@@ -1463,13 +996,12 @@ int nf_ct_port_nfattr_to_tuple(struct nfattr *tb[], | |||
1463 | if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) | 996 | if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) |
1464 | return -EINVAL; | 997 | return -EINVAL; |
1465 | 998 | ||
1466 | t->src.u.tcp.port = | 999 | t->src.u.tcp.port = *(__be16 *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]); |
1467 | *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]); | 1000 | t->dst.u.tcp.port = *(__be16 *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]); |
1468 | t->dst.u.tcp.port = | ||
1469 | *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]); | ||
1470 | 1001 | ||
1471 | return 0; | 1002 | return 0; |
1472 | } | 1003 | } |
1004 | EXPORT_SYMBOL_GPL(nf_ct_port_nfattr_to_tuple); | ||
1473 | #endif | 1005 | #endif |
1474 | 1006 | ||
1475 | /* Used by ipt_REJECT and ip6t_REJECT. */ | 1007 | /* Used by ipt_REJECT and ip6t_REJECT. */ |
@@ -1490,6 +1022,7 @@ void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) | |||
1490 | nskb->nfctinfo = ctinfo; | 1022 | nskb->nfctinfo = ctinfo; |
1491 | nf_conntrack_get(nskb->nfct); | 1023 | nf_conntrack_get(nskb->nfct); |
1492 | } | 1024 | } |
1025 | EXPORT_SYMBOL_GPL(__nf_conntrack_attach); | ||
1493 | 1026 | ||
1494 | static inline int | 1027 | static inline int |
1495 | do_iter(const struct nf_conntrack_tuple_hash *i, | 1028 | do_iter(const struct nf_conntrack_tuple_hash *i, |
@@ -1543,6 +1076,7 @@ nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data) | |||
1543 | nf_ct_put(ct); | 1076 | nf_ct_put(ct); |
1544 | } | 1077 | } |
1545 | } | 1078 | } |
1079 | EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); | ||
1546 | 1080 | ||
1547 | static int kill_all(struct nf_conn *i, void *data) | 1081 | static int kill_all(struct nf_conn *i, void *data) |
1548 | { | 1082 | { |
@@ -1562,6 +1096,7 @@ void nf_conntrack_flush() | |||
1562 | { | 1096 | { |
1563 | nf_ct_iterate_cleanup(kill_all, NULL); | 1097 | nf_ct_iterate_cleanup(kill_all, NULL); |
1564 | } | 1098 | } |
1099 | EXPORT_SYMBOL_GPL(nf_conntrack_flush); | ||
1565 | 1100 | ||
1566 | /* Mishearing the voices in his head, our hero wonders how he's | 1101 | /* Mishearing the voices in his head, our hero wonders how he's |
1567 | supposed to kill the mall. */ | 1102 | supposed to kill the mall. */ |
@@ -1599,6 +1134,8 @@ void nf_conntrack_cleanup(void) | |||
1599 | free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, | 1134 | free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, |
1600 | nf_conntrack_htable_size); | 1135 | nf_conntrack_htable_size); |
1601 | 1136 | ||
1137 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_generic); | ||
1138 | |||
1602 | /* free l3proto protocol tables */ | 1139 | /* free l3proto protocol tables */ |
1603 | for (i = 0; i < PF_MAX; i++) | 1140 | for (i = 0; i < PF_MAX; i++) |
1604 | if (nf_ct_protos[i]) { | 1141 | if (nf_ct_protos[i]) { |
@@ -1724,10 +1261,14 @@ int __init nf_conntrack_init(void) | |||
1724 | goto err_free_conntrack_slab; | 1261 | goto err_free_conntrack_slab; |
1725 | } | 1262 | } |
1726 | 1263 | ||
1264 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_generic); | ||
1265 | if (ret < 0) | ||
1266 | goto out_free_expect_slab; | ||
1267 | |||
1727 | /* Don't NEED lock here, but good form anyway. */ | 1268 | /* Don't NEED lock here, but good form anyway. */ |
1728 | write_lock_bh(&nf_conntrack_lock); | 1269 | write_lock_bh(&nf_conntrack_lock); |
1729 | for (i = 0; i < PF_MAX; i++) | 1270 | for (i = 0; i < AF_MAX; i++) |
1730 | nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto; | 1271 | nf_ct_l3protos[i] = &nf_conntrack_l3proto_generic; |
1731 | write_unlock_bh(&nf_conntrack_lock); | 1272 | write_unlock_bh(&nf_conntrack_lock); |
1732 | 1273 | ||
1733 | /* For use by REJECT target */ | 1274 | /* For use by REJECT target */ |
@@ -1741,6 +1282,8 @@ int __init nf_conntrack_init(void) | |||
1741 | 1282 | ||
1742 | return ret; | 1283 | return ret; |
1743 | 1284 | ||
1285 | out_free_expect_slab: | ||
1286 | kmem_cache_destroy(nf_conntrack_expect_cachep); | ||
1744 | err_free_conntrack_slab: | 1287 | err_free_conntrack_slab: |
1745 | nf_conntrack_unregister_cache(NF_CT_F_BASIC); | 1288 | nf_conntrack_unregister_cache(NF_CT_F_BASIC); |
1746 | err_free_hash: | 1289 | err_free_hash: |
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c new file mode 100644 index 000000000000..1a223e0c0856 --- /dev/null +++ b/net/netfilter/nf_conntrack_ecache.c | |||
@@ -0,0 +1,93 @@ | |||
1 | /* Event cache for netfilter. */ | ||
2 | |||
3 | /* (C) 1999-2001 Paul `Rusty' Russell | ||
4 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
5 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/netfilter.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/vmalloc.h> | ||
16 | #include <linux/stddef.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/percpu.h> | ||
19 | #include <linux/notifier.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/netdevice.h> | ||
22 | |||
23 | #include <net/netfilter/nf_conntrack.h> | ||
24 | #include <net/netfilter/nf_conntrack_core.h> | ||
25 | |||
26 | ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain); | ||
27 | EXPORT_SYMBOL_GPL(nf_conntrack_chain); | ||
28 | |||
29 | ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain); | ||
30 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_chain); | ||
31 | |||
32 | DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); | ||
33 | EXPORT_PER_CPU_SYMBOL_GPL(nf_conntrack_ecache); | ||
34 | |||
35 | /* deliver cached events and clear cache entry - must be called with locally | ||
36 | * disabled softirqs */ | ||
37 | static inline void | ||
38 | __nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache) | ||
39 | { | ||
40 | if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct) | ||
41 | && ecache->events) | ||
42 | atomic_notifier_call_chain(&nf_conntrack_chain, ecache->events, | ||
43 | ecache->ct); | ||
44 | |||
45 | ecache->events = 0; | ||
46 | nf_ct_put(ecache->ct); | ||
47 | ecache->ct = NULL; | ||
48 | } | ||
49 | |||
50 | /* Deliver all cached events for a particular conntrack. This is called | ||
51 | * by code prior to async packet handling for freeing the skb */ | ||
52 | void nf_ct_deliver_cached_events(const struct nf_conn *ct) | ||
53 | { | ||
54 | struct nf_conntrack_ecache *ecache; | ||
55 | |||
56 | local_bh_disable(); | ||
57 | ecache = &__get_cpu_var(nf_conntrack_ecache); | ||
58 | if (ecache->ct == ct) | ||
59 | __nf_ct_deliver_cached_events(ecache); | ||
60 | local_bh_enable(); | ||
61 | } | ||
62 | EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); | ||
63 | |||
64 | /* Deliver cached events for old pending events, if current conntrack != old */ | ||
65 | void __nf_ct_event_cache_init(struct nf_conn *ct) | ||
66 | { | ||
67 | struct nf_conntrack_ecache *ecache; | ||
68 | |||
69 | /* take care of delivering potentially old events */ | ||
70 | ecache = &__get_cpu_var(nf_conntrack_ecache); | ||
71 | BUG_ON(ecache->ct == ct); | ||
72 | if (ecache->ct) | ||
73 | __nf_ct_deliver_cached_events(ecache); | ||
74 | /* initialize for this conntrack/packet */ | ||
75 | ecache->ct = ct; | ||
76 | nf_conntrack_get(&ct->ct_general); | ||
77 | } | ||
78 | EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init); | ||
79 | |||
80 | /* flush the event cache - touches other CPU's data and must not be called | ||
81 | * while packets are still passing through the code */ | ||
82 | void nf_ct_event_cache_flush(void) | ||
83 | { | ||
84 | struct nf_conntrack_ecache *ecache; | ||
85 | int cpu; | ||
86 | |||
87 | for_each_possible_cpu(cpu) { | ||
88 | ecache = &per_cpu(nf_conntrack_ecache, cpu); | ||
89 | if (ecache->ct) | ||
90 | nf_ct_put(ecache->ct); | ||
91 | } | ||
92 | } | ||
93 | |||
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c new file mode 100644 index 000000000000..588d37937046 --- /dev/null +++ b/net/netfilter/nf_conntrack_expect.c | |||
@@ -0,0 +1,442 @@ | |||
1 | /* Expectation handling for nf_conntrack. */ | ||
2 | |||
3 | /* (C) 1999-2001 Paul `Rusty' Russell | ||
4 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
5 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/netfilter.h> | ||
14 | #include <linux/skbuff.h> | ||
15 | #include <linux/proc_fs.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include <linux/stddef.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/err.h> | ||
20 | #include <linux/percpu.h> | ||
21 | #include <linux/kernel.h> | ||
22 | |||
23 | #include <net/netfilter/nf_conntrack.h> | ||
24 | #include <net/netfilter/nf_conntrack_core.h> | ||
25 | #include <net/netfilter/nf_conntrack_expect.h> | ||
26 | #include <net/netfilter/nf_conntrack_helper.h> | ||
27 | #include <net/netfilter/nf_conntrack_tuple.h> | ||
28 | |||
29 | LIST_HEAD(nf_conntrack_expect_list); | ||
30 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_list); | ||
31 | |||
32 | kmem_cache_t *nf_conntrack_expect_cachep __read_mostly; | ||
33 | static unsigned int nf_conntrack_expect_next_id; | ||
34 | |||
35 | /* nf_conntrack_expect helper functions */ | ||
36 | void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) | ||
37 | { | ||
38 | struct nf_conn_help *master_help = nfct_help(exp->master); | ||
39 | |||
40 | NF_CT_ASSERT(master_help); | ||
41 | NF_CT_ASSERT(!timer_pending(&exp->timeout)); | ||
42 | |||
43 | list_del(&exp->list); | ||
44 | NF_CT_STAT_INC(expect_delete); | ||
45 | master_help->expecting--; | ||
46 | nf_conntrack_expect_put(exp); | ||
47 | } | ||
48 | EXPORT_SYMBOL_GPL(nf_ct_unlink_expect); | ||
49 | |||
50 | static void expectation_timed_out(unsigned long ul_expect) | ||
51 | { | ||
52 | struct nf_conntrack_expect *exp = (void *)ul_expect; | ||
53 | |||
54 | write_lock_bh(&nf_conntrack_lock); | ||
55 | nf_ct_unlink_expect(exp); | ||
56 | write_unlock_bh(&nf_conntrack_lock); | ||
57 | nf_conntrack_expect_put(exp); | ||
58 | } | ||
59 | |||
60 | struct nf_conntrack_expect * | ||
61 | __nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple) | ||
62 | { | ||
63 | struct nf_conntrack_expect *i; | ||
64 | |||
65 | list_for_each_entry(i, &nf_conntrack_expect_list, list) { | ||
66 | if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) | ||
67 | return i; | ||
68 | } | ||
69 | return NULL; | ||
70 | } | ||
71 | EXPORT_SYMBOL_GPL(__nf_conntrack_expect_find); | ||
72 | |||
73 | /* Just find a expectation corresponding to a tuple. */ | ||
74 | struct nf_conntrack_expect * | ||
75 | nf_conntrack_expect_find_get(const struct nf_conntrack_tuple *tuple) | ||
76 | { | ||
77 | struct nf_conntrack_expect *i; | ||
78 | |||
79 | read_lock_bh(&nf_conntrack_lock); | ||
80 | i = __nf_conntrack_expect_find(tuple); | ||
81 | if (i) | ||
82 | atomic_inc(&i->use); | ||
83 | read_unlock_bh(&nf_conntrack_lock); | ||
84 | |||
85 | return i; | ||
86 | } | ||
87 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_find_get); | ||
88 | |||
89 | /* If an expectation for this connection is found, it gets delete from | ||
90 | * global list then returned. */ | ||
91 | struct nf_conntrack_expect * | ||
92 | find_expectation(const struct nf_conntrack_tuple *tuple) | ||
93 | { | ||
94 | struct nf_conntrack_expect *i; | ||
95 | |||
96 | list_for_each_entry(i, &nf_conntrack_expect_list, list) { | ||
97 | /* If master is not in hash table yet (ie. packet hasn't left | ||
98 | this machine yet), how can other end know about expected? | ||
99 | Hence these are not the droids you are looking for (if | ||
100 | master ct never got confirmed, we'd hold a reference to it | ||
101 | and weird things would happen to future packets). */ | ||
102 | if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) | ||
103 | && nf_ct_is_confirmed(i->master)) { | ||
104 | if (i->flags & NF_CT_EXPECT_PERMANENT) { | ||
105 | atomic_inc(&i->use); | ||
106 | return i; | ||
107 | } else if (del_timer(&i->timeout)) { | ||
108 | nf_ct_unlink_expect(i); | ||
109 | return i; | ||
110 | } | ||
111 | } | ||
112 | } | ||
113 | return NULL; | ||
114 | } | ||
115 | |||
116 | /* delete all expectations for this conntrack */ | ||
117 | void nf_ct_remove_expectations(struct nf_conn *ct) | ||
118 | { | ||
119 | struct nf_conntrack_expect *i, *tmp; | ||
120 | struct nf_conn_help *help = nfct_help(ct); | ||
121 | |||
122 | /* Optimization: most connection never expect any others. */ | ||
123 | if (!help || help->expecting == 0) | ||
124 | return; | ||
125 | |||
126 | list_for_each_entry_safe(i, tmp, &nf_conntrack_expect_list, list) { | ||
127 | if (i->master == ct && del_timer(&i->timeout)) { | ||
128 | nf_ct_unlink_expect(i); | ||
129 | nf_conntrack_expect_put(i); | ||
130 | } | ||
131 | } | ||
132 | } | ||
133 | EXPORT_SYMBOL_GPL(nf_ct_remove_expectations); | ||
134 | |||
135 | /* Would two expected things clash? */ | ||
136 | static inline int expect_clash(const struct nf_conntrack_expect *a, | ||
137 | const struct nf_conntrack_expect *b) | ||
138 | { | ||
139 | /* Part covered by intersection of masks must be unequal, | ||
140 | otherwise they clash */ | ||
141 | struct nf_conntrack_tuple intersect_mask; | ||
142 | int count; | ||
143 | |||
144 | intersect_mask.src.l3num = a->mask.src.l3num & b->mask.src.l3num; | ||
145 | intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all; | ||
146 | intersect_mask.dst.u.all = a->mask.dst.u.all & b->mask.dst.u.all; | ||
147 | intersect_mask.dst.protonum = a->mask.dst.protonum | ||
148 | & b->mask.dst.protonum; | ||
149 | |||
150 | for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){ | ||
151 | intersect_mask.src.u3.all[count] = | ||
152 | a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; | ||
153 | } | ||
154 | |||
155 | for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){ | ||
156 | intersect_mask.dst.u3.all[count] = | ||
157 | a->mask.dst.u3.all[count] & b->mask.dst.u3.all[count]; | ||
158 | } | ||
159 | |||
160 | return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); | ||
161 | } | ||
162 | |||
163 | static inline int expect_matches(const struct nf_conntrack_expect *a, | ||
164 | const struct nf_conntrack_expect *b) | ||
165 | { | ||
166 | return a->master == b->master | ||
167 | && nf_ct_tuple_equal(&a->tuple, &b->tuple) | ||
168 | && nf_ct_tuple_equal(&a->mask, &b->mask); | ||
169 | } | ||
170 | |||
171 | /* Generally a bad idea to call this: could have matched already. */ | ||
172 | void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp) | ||
173 | { | ||
174 | struct nf_conntrack_expect *i; | ||
175 | |||
176 | write_lock_bh(&nf_conntrack_lock); | ||
177 | /* choose the the oldest expectation to evict */ | ||
178 | list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) { | ||
179 | if (expect_matches(i, exp) && del_timer(&i->timeout)) { | ||
180 | nf_ct_unlink_expect(i); | ||
181 | write_unlock_bh(&nf_conntrack_lock); | ||
182 | nf_conntrack_expect_put(i); | ||
183 | return; | ||
184 | } | ||
185 | } | ||
186 | write_unlock_bh(&nf_conntrack_lock); | ||
187 | } | ||
188 | EXPORT_SYMBOL_GPL(nf_conntrack_unexpect_related); | ||
189 | |||
190 | /* We don't increase the master conntrack refcount for non-fulfilled | ||
191 | * conntracks. During the conntrack destruction, the expectations are | ||
192 | * always killed before the conntrack itself */ | ||
193 | struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me) | ||
194 | { | ||
195 | struct nf_conntrack_expect *new; | ||
196 | |||
197 | new = kmem_cache_alloc(nf_conntrack_expect_cachep, GFP_ATOMIC); | ||
198 | if (!new) | ||
199 | return NULL; | ||
200 | |||
201 | new->master = me; | ||
202 | atomic_set(&new->use, 1); | ||
203 | return new; | ||
204 | } | ||
205 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_alloc); | ||
206 | |||
207 | void nf_conntrack_expect_init(struct nf_conntrack_expect *exp, int family, | ||
208 | union nf_conntrack_address *saddr, | ||
209 | union nf_conntrack_address *daddr, | ||
210 | u_int8_t proto, __be16 *src, __be16 *dst) | ||
211 | { | ||
212 | int len; | ||
213 | |||
214 | if (family == AF_INET) | ||
215 | len = 4; | ||
216 | else | ||
217 | len = 16; | ||
218 | |||
219 | exp->flags = 0; | ||
220 | exp->expectfn = NULL; | ||
221 | exp->helper = NULL; | ||
222 | exp->tuple.src.l3num = family; | ||
223 | exp->tuple.dst.protonum = proto; | ||
224 | exp->mask.src.l3num = 0xFFFF; | ||
225 | exp->mask.dst.protonum = 0xFF; | ||
226 | |||
227 | if (saddr) { | ||
228 | memcpy(&exp->tuple.src.u3, saddr, len); | ||
229 | if (sizeof(exp->tuple.src.u3) > len) | ||
230 | /* address needs to be cleared for nf_ct_tuple_equal */ | ||
231 | memset((void *)&exp->tuple.src.u3 + len, 0x00, | ||
232 | sizeof(exp->tuple.src.u3) - len); | ||
233 | memset(&exp->mask.src.u3, 0xFF, len); | ||
234 | if (sizeof(exp->mask.src.u3) > len) | ||
235 | memset((void *)&exp->mask.src.u3 + len, 0x00, | ||
236 | sizeof(exp->mask.src.u3) - len); | ||
237 | } else { | ||
238 | memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3)); | ||
239 | memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3)); | ||
240 | } | ||
241 | |||
242 | if (daddr) { | ||
243 | memcpy(&exp->tuple.dst.u3, daddr, len); | ||
244 | if (sizeof(exp->tuple.dst.u3) > len) | ||
245 | /* address needs to be cleared for nf_ct_tuple_equal */ | ||
246 | memset((void *)&exp->tuple.dst.u3 + len, 0x00, | ||
247 | sizeof(exp->tuple.dst.u3) - len); | ||
248 | memset(&exp->mask.dst.u3, 0xFF, len); | ||
249 | if (sizeof(exp->mask.dst.u3) > len) | ||
250 | memset((void *)&exp->mask.dst.u3 + len, 0x00, | ||
251 | sizeof(exp->mask.dst.u3) - len); | ||
252 | } else { | ||
253 | memset(&exp->tuple.dst.u3, 0x00, sizeof(exp->tuple.dst.u3)); | ||
254 | memset(&exp->mask.dst.u3, 0x00, sizeof(exp->mask.dst.u3)); | ||
255 | } | ||
256 | |||
257 | if (src) { | ||
258 | exp->tuple.src.u.all = (__force u16)*src; | ||
259 | exp->mask.src.u.all = 0xFFFF; | ||
260 | } else { | ||
261 | exp->tuple.src.u.all = 0; | ||
262 | exp->mask.src.u.all = 0; | ||
263 | } | ||
264 | |||
265 | if (dst) { | ||
266 | exp->tuple.dst.u.all = (__force u16)*dst; | ||
267 | exp->mask.dst.u.all = 0xFFFF; | ||
268 | } else { | ||
269 | exp->tuple.dst.u.all = 0; | ||
270 | exp->mask.dst.u.all = 0; | ||
271 | } | ||
272 | } | ||
273 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_init); | ||
274 | |||
275 | void nf_conntrack_expect_put(struct nf_conntrack_expect *exp) | ||
276 | { | ||
277 | if (atomic_dec_and_test(&exp->use)) | ||
278 | kmem_cache_free(nf_conntrack_expect_cachep, exp); | ||
279 | } | ||
280 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_put); | ||
281 | |||
282 | static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp) | ||
283 | { | ||
284 | struct nf_conn_help *master_help = nfct_help(exp->master); | ||
285 | |||
286 | atomic_inc(&exp->use); | ||
287 | master_help->expecting++; | ||
288 | list_add(&exp->list, &nf_conntrack_expect_list); | ||
289 | |||
290 | init_timer(&exp->timeout); | ||
291 | exp->timeout.data = (unsigned long)exp; | ||
292 | exp->timeout.function = expectation_timed_out; | ||
293 | exp->timeout.expires = jiffies + master_help->helper->timeout * HZ; | ||
294 | add_timer(&exp->timeout); | ||
295 | |||
296 | exp->id = ++nf_conntrack_expect_next_id; | ||
297 | atomic_inc(&exp->use); | ||
298 | NF_CT_STAT_INC(expect_create); | ||
299 | } | ||
300 | |||
301 | /* Race with expectations being used means we could have none to find; OK. */ | ||
302 | static void evict_oldest_expect(struct nf_conn *master) | ||
303 | { | ||
304 | struct nf_conntrack_expect *i; | ||
305 | |||
306 | list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) { | ||
307 | if (i->master == master) { | ||
308 | if (del_timer(&i->timeout)) { | ||
309 | nf_ct_unlink_expect(i); | ||
310 | nf_conntrack_expect_put(i); | ||
311 | } | ||
312 | break; | ||
313 | } | ||
314 | } | ||
315 | } | ||
316 | |||
317 | static inline int refresh_timer(struct nf_conntrack_expect *i) | ||
318 | { | ||
319 | struct nf_conn_help *master_help = nfct_help(i->master); | ||
320 | |||
321 | if (!del_timer(&i->timeout)) | ||
322 | return 0; | ||
323 | |||
324 | i->timeout.expires = jiffies + master_help->helper->timeout*HZ; | ||
325 | add_timer(&i->timeout); | ||
326 | return 1; | ||
327 | } | ||
328 | |||
329 | int nf_conntrack_expect_related(struct nf_conntrack_expect *expect) | ||
330 | { | ||
331 | struct nf_conntrack_expect *i; | ||
332 | struct nf_conn *master = expect->master; | ||
333 | struct nf_conn_help *master_help = nfct_help(master); | ||
334 | int ret; | ||
335 | |||
336 | NF_CT_ASSERT(master_help); | ||
337 | |||
338 | write_lock_bh(&nf_conntrack_lock); | ||
339 | list_for_each_entry(i, &nf_conntrack_expect_list, list) { | ||
340 | if (expect_matches(i, expect)) { | ||
341 | /* Refresh timer: if it's dying, ignore.. */ | ||
342 | if (refresh_timer(i)) { | ||
343 | ret = 0; | ||
344 | goto out; | ||
345 | } | ||
346 | } else if (expect_clash(i, expect)) { | ||
347 | ret = -EBUSY; | ||
348 | goto out; | ||
349 | } | ||
350 | } | ||
351 | /* Will be over limit? */ | ||
352 | if (master_help->helper->max_expected && | ||
353 | master_help->expecting >= master_help->helper->max_expected) | ||
354 | evict_oldest_expect(master); | ||
355 | |||
356 | nf_conntrack_expect_insert(expect); | ||
357 | nf_conntrack_expect_event(IPEXP_NEW, expect); | ||
358 | ret = 0; | ||
359 | out: | ||
360 | write_unlock_bh(&nf_conntrack_lock); | ||
361 | return ret; | ||
362 | } | ||
363 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_related); | ||
364 | |||
365 | #ifdef CONFIG_PROC_FS | ||
366 | static void *exp_seq_start(struct seq_file *s, loff_t *pos) | ||
367 | { | ||
368 | struct list_head *e = &nf_conntrack_expect_list; | ||
369 | loff_t i; | ||
370 | |||
371 | /* strange seq_file api calls stop even if we fail, | ||
372 | * thus we need to grab lock since stop unlocks */ | ||
373 | read_lock_bh(&nf_conntrack_lock); | ||
374 | |||
375 | if (list_empty(e)) | ||
376 | return NULL; | ||
377 | |||
378 | for (i = 0; i <= *pos; i++) { | ||
379 | e = e->next; | ||
380 | if (e == &nf_conntrack_expect_list) | ||
381 | return NULL; | ||
382 | } | ||
383 | return e; | ||
384 | } | ||
385 | |||
386 | static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) | ||
387 | { | ||
388 | struct list_head *e = v; | ||
389 | |||
390 | ++*pos; | ||
391 | e = e->next; | ||
392 | |||
393 | if (e == &nf_conntrack_expect_list) | ||
394 | return NULL; | ||
395 | |||
396 | return e; | ||
397 | } | ||
398 | |||
399 | static void exp_seq_stop(struct seq_file *s, void *v) | ||
400 | { | ||
401 | read_unlock_bh(&nf_conntrack_lock); | ||
402 | } | ||
403 | |||
404 | static int exp_seq_show(struct seq_file *s, void *v) | ||
405 | { | ||
406 | struct nf_conntrack_expect *expect = v; | ||
407 | |||
408 | if (expect->timeout.function) | ||
409 | seq_printf(s, "%ld ", timer_pending(&expect->timeout) | ||
410 | ? (long)(expect->timeout.expires - jiffies)/HZ : 0); | ||
411 | else | ||
412 | seq_printf(s, "- "); | ||
413 | seq_printf(s, "l3proto = %u proto=%u ", | ||
414 | expect->tuple.src.l3num, | ||
415 | expect->tuple.dst.protonum); | ||
416 | print_tuple(s, &expect->tuple, | ||
417 | __nf_ct_l3proto_find(expect->tuple.src.l3num), | ||
418 | __nf_ct_l4proto_find(expect->tuple.src.l3num, | ||
419 | expect->tuple.dst.protonum)); | ||
420 | return seq_putc(s, '\n'); | ||
421 | } | ||
422 | |||
423 | static struct seq_operations exp_seq_ops = { | ||
424 | .start = exp_seq_start, | ||
425 | .next = exp_seq_next, | ||
426 | .stop = exp_seq_stop, | ||
427 | .show = exp_seq_show | ||
428 | }; | ||
429 | |||
430 | static int exp_open(struct inode *inode, struct file *file) | ||
431 | { | ||
432 | return seq_open(file, &exp_seq_ops); | ||
433 | } | ||
434 | |||
435 | struct file_operations exp_file_ops = { | ||
436 | .owner = THIS_MODULE, | ||
437 | .open = exp_open, | ||
438 | .read = seq_read, | ||
439 | .llseek = seq_lseek, | ||
440 | .release = seq_release | ||
441 | }; | ||
442 | #endif /* CONFIG_PROC_FS */ | ||
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index 0c17a5bd112b..92a947168761 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c | |||
@@ -26,12 +26,15 @@ | |||
26 | #include <net/tcp.h> | 26 | #include <net/tcp.h> |
27 | 27 | ||
28 | #include <net/netfilter/nf_conntrack.h> | 28 | #include <net/netfilter/nf_conntrack.h> |
29 | #include <net/netfilter/nf_conntrack_expect.h> | ||
30 | #include <net/netfilter/nf_conntrack_ecache.h> | ||
29 | #include <net/netfilter/nf_conntrack_helper.h> | 31 | #include <net/netfilter/nf_conntrack_helper.h> |
30 | #include <linux/netfilter/nf_conntrack_ftp.h> | 32 | #include <linux/netfilter/nf_conntrack_ftp.h> |
31 | 33 | ||
32 | MODULE_LICENSE("GPL"); | 34 | MODULE_LICENSE("GPL"); |
33 | MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); | 35 | MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); |
34 | MODULE_DESCRIPTION("ftp connection tracking helper"); | 36 | MODULE_DESCRIPTION("ftp connection tracking helper"); |
37 | MODULE_ALIAS("ip_conntrack_ftp"); | ||
35 | 38 | ||
36 | /* This is slow, but it's simple. --RR */ | 39 | /* This is slow, but it's simple. --RR */ |
37 | static char *ftp_buffer; | 40 | static char *ftp_buffer; |
@@ -48,7 +51,7 @@ module_param(loose, bool, 0600); | |||
48 | 51 | ||
49 | unsigned int (*nf_nat_ftp_hook)(struct sk_buff **pskb, | 52 | unsigned int (*nf_nat_ftp_hook)(struct sk_buff **pskb, |
50 | enum ip_conntrack_info ctinfo, | 53 | enum ip_conntrack_info ctinfo, |
51 | enum ip_ct_ftp_type type, | 54 | enum nf_ct_ftp_type type, |
52 | unsigned int matchoff, | 55 | unsigned int matchoff, |
53 | unsigned int matchlen, | 56 | unsigned int matchlen, |
54 | struct nf_conntrack_expect *exp, | 57 | struct nf_conntrack_expect *exp, |
@@ -71,7 +74,7 @@ static struct ftp_search { | |||
71 | size_t plen; | 74 | size_t plen; |
72 | char skip; | 75 | char skip; |
73 | char term; | 76 | char term; |
74 | enum ip_ct_ftp_type ftptype; | 77 | enum nf_ct_ftp_type ftptype; |
75 | int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char); | 78 | int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char); |
76 | } search[IP_CT_DIR_MAX][2] = { | 79 | } search[IP_CT_DIR_MAX][2] = { |
77 | [IP_CT_DIR_ORIGINAL] = { | 80 | [IP_CT_DIR_ORIGINAL] = { |
@@ -80,7 +83,7 @@ static struct ftp_search { | |||
80 | .plen = sizeof("PORT") - 1, | 83 | .plen = sizeof("PORT") - 1, |
81 | .skip = ' ', | 84 | .skip = ' ', |
82 | .term = '\r', | 85 | .term = '\r', |
83 | .ftptype = IP_CT_FTP_PORT, | 86 | .ftptype = NF_CT_FTP_PORT, |
84 | .getnum = try_rfc959, | 87 | .getnum = try_rfc959, |
85 | }, | 88 | }, |
86 | { | 89 | { |
@@ -88,7 +91,7 @@ static struct ftp_search { | |||
88 | .plen = sizeof("EPRT") - 1, | 91 | .plen = sizeof("EPRT") - 1, |
89 | .skip = ' ', | 92 | .skip = ' ', |
90 | .term = '\r', | 93 | .term = '\r', |
91 | .ftptype = IP_CT_FTP_EPRT, | 94 | .ftptype = NF_CT_FTP_EPRT, |
92 | .getnum = try_eprt, | 95 | .getnum = try_eprt, |
93 | }, | 96 | }, |
94 | }, | 97 | }, |
@@ -98,7 +101,7 @@ static struct ftp_search { | |||
98 | .plen = sizeof("227 ") - 1, | 101 | .plen = sizeof("227 ") - 1, |
99 | .skip = '(', | 102 | .skip = '(', |
100 | .term = ')', | 103 | .term = ')', |
101 | .ftptype = IP_CT_FTP_PASV, | 104 | .ftptype = NF_CT_FTP_PASV, |
102 | .getnum = try_rfc959, | 105 | .getnum = try_rfc959, |
103 | }, | 106 | }, |
104 | { | 107 | { |
@@ -106,7 +109,7 @@ static struct ftp_search { | |||
106 | .plen = sizeof("229 ") - 1, | 109 | .plen = sizeof("229 ") - 1, |
107 | .skip = '(', | 110 | .skip = '(', |
108 | .term = ')', | 111 | .term = ')', |
109 | .ftptype = IP_CT_FTP_EPSV, | 112 | .ftptype = NF_CT_FTP_EPSV, |
110 | .getnum = try_epsv_response, | 113 | .getnum = try_epsv_response, |
111 | }, | 114 | }, |
112 | }, | 115 | }, |
@@ -171,7 +174,7 @@ static int try_rfc959(const char *data, size_t dlen, | |||
171 | 174 | ||
172 | /* Grab port: number up to delimiter */ | 175 | /* Grab port: number up to delimiter */ |
173 | static int get_port(const char *data, int start, size_t dlen, char delim, | 176 | static int get_port(const char *data, int start, size_t dlen, char delim, |
174 | u_int16_t *port) | 177 | __be16 *port) |
175 | { | 178 | { |
176 | u_int16_t tmp_port = 0; | 179 | u_int16_t tmp_port = 0; |
177 | int i; | 180 | int i; |
@@ -317,7 +320,7 @@ static int find_pattern(const char *data, size_t dlen, | |||
317 | } | 320 | } |
318 | 321 | ||
319 | /* Look up to see if we're just after a \n. */ | 322 | /* Look up to see if we're just after a \n. */ |
320 | static int find_nl_seq(u32 seq, const struct ip_ct_ftp_master *info, int dir) | 323 | static int find_nl_seq(u32 seq, const struct nf_ct_ftp_master *info, int dir) |
321 | { | 324 | { |
322 | unsigned int i; | 325 | unsigned int i; |
323 | 326 | ||
@@ -328,7 +331,7 @@ static int find_nl_seq(u32 seq, const struct ip_ct_ftp_master *info, int dir) | |||
328 | } | 331 | } |
329 | 332 | ||
330 | /* We don't update if it's older than what we have. */ | 333 | /* We don't update if it's older than what we have. */ |
331 | static void update_nl_seq(u32 nl_seq, struct ip_ct_ftp_master *info, int dir, | 334 | static void update_nl_seq(u32 nl_seq, struct nf_ct_ftp_master *info, int dir, |
332 | struct sk_buff *skb) | 335 | struct sk_buff *skb) |
333 | { | 336 | { |
334 | unsigned int i, oldest = NUM_SEQ_TO_REMEMBER; | 337 | unsigned int i, oldest = NUM_SEQ_TO_REMEMBER; |
@@ -364,12 +367,12 @@ static int help(struct sk_buff **pskb, | |||
364 | u32 seq; | 367 | u32 seq; |
365 | int dir = CTINFO2DIR(ctinfo); | 368 | int dir = CTINFO2DIR(ctinfo); |
366 | unsigned int matchlen, matchoff; | 369 | unsigned int matchlen, matchoff; |
367 | struct ip_ct_ftp_master *ct_ftp_info = &nfct_help(ct)->help.ct_ftp_info; | 370 | struct nf_ct_ftp_master *ct_ftp_info = &nfct_help(ct)->help.ct_ftp_info; |
368 | struct nf_conntrack_expect *exp; | 371 | struct nf_conntrack_expect *exp; |
369 | struct nf_conntrack_man cmd = {}; | 372 | struct nf_conntrack_man cmd = {}; |
370 | |||
371 | unsigned int i; | 373 | unsigned int i; |
372 | int found = 0, ends_in_nl; | 374 | int found = 0, ends_in_nl; |
375 | typeof(nf_nat_ftp_hook) nf_nat_ftp; | ||
373 | 376 | ||
374 | /* Until there's been traffic both ways, don't look in packets. */ | 377 | /* Until there's been traffic both ways, don't look in packets. */ |
375 | if (ctinfo != IP_CT_ESTABLISHED | 378 | if (ctinfo != IP_CT_ESTABLISHED |
@@ -500,12 +503,12 @@ static int help(struct sk_buff **pskb, | |||
500 | .u = { .tcp = { 0 }}, | 503 | .u = { .tcp = { 0 }}, |
501 | }, | 504 | }, |
502 | .dst = { .protonum = 0xFF, | 505 | .dst = { .protonum = 0xFF, |
503 | .u = { .tcp = { 0xFFFF }}, | 506 | .u = { .tcp = { __constant_htons(0xFFFF) }}, |
504 | }, | 507 | }, |
505 | }; | 508 | }; |
506 | if (cmd.l3num == PF_INET) { | 509 | if (cmd.l3num == PF_INET) { |
507 | exp->mask.src.u3.ip = 0xFFFFFFFF; | 510 | exp->mask.src.u3.ip = htonl(0xFFFFFFFF); |
508 | exp->mask.dst.u3.ip = 0xFFFFFFFF; | 511 | exp->mask.dst.u3.ip = htonl(0xFFFFFFFF); |
509 | } else { | 512 | } else { |
510 | memset(exp->mask.src.u3.ip6, 0xFF, | 513 | memset(exp->mask.src.u3.ip6, 0xFF, |
511 | sizeof(exp->mask.src.u3.ip6)); | 514 | sizeof(exp->mask.src.u3.ip6)); |
@@ -514,13 +517,15 @@ static int help(struct sk_buff **pskb, | |||
514 | } | 517 | } |
515 | 518 | ||
516 | exp->expectfn = NULL; | 519 | exp->expectfn = NULL; |
520 | exp->helper = NULL; | ||
517 | exp->flags = 0; | 521 | exp->flags = 0; |
518 | 522 | ||
519 | /* Now, NAT might want to mangle the packet, and register the | 523 | /* Now, NAT might want to mangle the packet, and register the |
520 | * (possibly changed) expectation itself. */ | 524 | * (possibly changed) expectation itself. */ |
521 | if (nf_nat_ftp_hook) | 525 | nf_nat_ftp = rcu_dereference(nf_nat_ftp_hook); |
522 | ret = nf_nat_ftp_hook(pskb, ctinfo, search[dir][i].ftptype, | 526 | if (nf_nat_ftp && ct->status & IPS_NAT_MASK) |
523 | matchoff, matchlen, exp, &seq); | 527 | ret = nf_nat_ftp(pskb, ctinfo, search[dir][i].ftptype, |
528 | matchoff, matchlen, exp, &seq); | ||
524 | else { | 529 | else { |
525 | /* Can't expect this? Best to drop packet now. */ | 530 | /* Can't expect this? Best to drop packet now. */ |
526 | if (nf_conntrack_expect_related(exp) != 0) | 531 | if (nf_conntrack_expect_related(exp) != 0) |
@@ -584,7 +589,8 @@ static int __init nf_conntrack_ftp_init(void) | |||
584 | for (j = 0; j < 2; j++) { | 589 | for (j = 0; j < 2; j++) { |
585 | ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]); | 590 | ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]); |
586 | ftp[i][j].tuple.dst.protonum = IPPROTO_TCP; | 591 | ftp[i][j].tuple.dst.protonum = IPPROTO_TCP; |
587 | ftp[i][j].mask.src.u.tcp.port = 0xFFFF; | 592 | ftp[i][j].mask.src.l3num = 0xFFFF; |
593 | ftp[i][j].mask.src.u.tcp.port = htons(0xFFFF); | ||
588 | ftp[i][j].mask.dst.protonum = 0xFF; | 594 | ftp[i][j].mask.dst.protonum = 0xFF; |
589 | ftp[i][j].max_expected = 1; | 595 | ftp[i][j].max_expected = 1; |
590 | ftp[i][j].timeout = 5 * 60; /* 5 Minutes */ | 596 | ftp[i][j].timeout = 5 * 60; /* 5 Minutes */ |
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c index 26dfecadb335..f6fad713d484 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_h323_asn1.c +++ b/net/netfilter/nf_conntrack_h323_asn1.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #else | 15 | #else |
16 | #include <stdio.h> | 16 | #include <stdio.h> |
17 | #endif | 17 | #endif |
18 | #include <linux/netfilter_ipv4/ip_conntrack_helper_h323_asn1.h> | 18 | #include <linux/netfilter/nf_conntrack_h323_asn1.h> |
19 | 19 | ||
20 | /* Trace Flag */ | 20 | /* Trace Flag */ |
21 | #ifndef H323_TRACE | 21 | #ifndef H323_TRACE |
@@ -144,7 +144,7 @@ static decoder_t Decoders[] = { | |||
144 | /**************************************************************************** | 144 | /**************************************************************************** |
145 | * H.323 Types | 145 | * H.323 Types |
146 | ****************************************************************************/ | 146 | ****************************************************************************/ |
147 | #include "ip_conntrack_helper_h323_types.c" | 147 | #include "nf_conntrack_h323_types.c" |
148 | 148 | ||
149 | /**************************************************************************** | 149 | /**************************************************************************** |
150 | * Functions | 150 | * Functions |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c new file mode 100644 index 000000000000..6d8568959f82 --- /dev/null +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
@@ -0,0 +1,1856 @@ | |||
1 | /* | ||
2 | * H.323 connection tracking helper | ||
3 | * | ||
4 | * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> | ||
5 | * | ||
6 | * This source code is licensed under General Public License version 2. | ||
7 | * | ||
8 | * Based on the 'brute force' H.323 connection tracking module by | ||
9 | * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | ||
10 | * | ||
11 | * For more information, please see http://nath323.sourceforge.net/ | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/moduleparam.h> | ||
16 | #include <linux/ctype.h> | ||
17 | #include <linux/inet.h> | ||
18 | #include <linux/in.h> | ||
19 | #include <linux/ip.h> | ||
20 | #include <linux/udp.h> | ||
21 | #include <linux/tcp.h> | ||
22 | #include <linux/skbuff.h> | ||
23 | #include <net/route.h> | ||
24 | #include <net/ip6_route.h> | ||
25 | |||
26 | #include <net/netfilter/nf_conntrack.h> | ||
27 | #include <net/netfilter/nf_conntrack_core.h> | ||
28 | #include <net/netfilter/nf_conntrack_tuple.h> | ||
29 | #include <net/netfilter/nf_conntrack_expect.h> | ||
30 | #include <net/netfilter/nf_conntrack_ecache.h> | ||
31 | #include <net/netfilter/nf_conntrack_helper.h> | ||
32 | #include <linux/netfilter/nf_conntrack_h323.h> | ||
33 | |||
34 | #if 0 | ||
35 | #define DEBUGP printk | ||
36 | #else | ||
37 | #define DEBUGP(format, args...) | ||
38 | #endif | ||
39 | |||
40 | /* Parameters */ | ||
41 | static unsigned int default_rrq_ttl __read_mostly = 300; | ||
42 | module_param(default_rrq_ttl, uint, 0600); | ||
43 | MODULE_PARM_DESC(default_rrq_ttl, "use this TTL if it's missing in RRQ"); | ||
44 | |||
45 | static int gkrouted_only __read_mostly = 1; | ||
46 | module_param(gkrouted_only, int, 0600); | ||
47 | MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper"); | ||
48 | |||
49 | static int callforward_filter __read_mostly = 1; | ||
50 | module_param(callforward_filter, bool, 0600); | ||
51 | MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations " | ||
52 | "if both endpoints are on different sides " | ||
53 | "(determined by routing information)"); | ||
54 | |||
55 | /* Hooks for NAT */ | ||
56 | int (*set_h245_addr_hook) (struct sk_buff **pskb, | ||
57 | unsigned char **data, int dataoff, | ||
58 | H245_TransportAddress *taddr, | ||
59 | union nf_conntrack_address *addr, __be16 port) | ||
60 | __read_mostly; | ||
61 | int (*set_h225_addr_hook) (struct sk_buff **pskb, | ||
62 | unsigned char **data, int dataoff, | ||
63 | TransportAddress *taddr, | ||
64 | union nf_conntrack_address *addr, __be16 port) | ||
65 | __read_mostly; | ||
66 | int (*set_sig_addr_hook) (struct sk_buff **pskb, | ||
67 | struct nf_conn *ct, | ||
68 | enum ip_conntrack_info ctinfo, | ||
69 | unsigned char **data, | ||
70 | TransportAddress *taddr, int count) __read_mostly; | ||
71 | int (*set_ras_addr_hook) (struct sk_buff **pskb, | ||
72 | struct nf_conn *ct, | ||
73 | enum ip_conntrack_info ctinfo, | ||
74 | unsigned char **data, | ||
75 | TransportAddress *taddr, int count) __read_mostly; | ||
76 | int (*nat_rtp_rtcp_hook) (struct sk_buff **pskb, | ||
77 | struct nf_conn *ct, | ||
78 | enum ip_conntrack_info ctinfo, | ||
79 | unsigned char **data, int dataoff, | ||
80 | H245_TransportAddress *taddr, | ||
81 | __be16 port, __be16 rtp_port, | ||
82 | struct nf_conntrack_expect *rtp_exp, | ||
83 | struct nf_conntrack_expect *rtcp_exp) __read_mostly; | ||
84 | int (*nat_t120_hook) (struct sk_buff **pskb, | ||
85 | struct nf_conn *ct, | ||
86 | enum ip_conntrack_info ctinfo, | ||
87 | unsigned char **data, int dataoff, | ||
88 | H245_TransportAddress *taddr, __be16 port, | ||
89 | struct nf_conntrack_expect *exp) __read_mostly; | ||
90 | int (*nat_h245_hook) (struct sk_buff **pskb, | ||
91 | struct nf_conn *ct, | ||
92 | enum ip_conntrack_info ctinfo, | ||
93 | unsigned char **data, int dataoff, | ||
94 | TransportAddress *taddr, __be16 port, | ||
95 | struct nf_conntrack_expect *exp) __read_mostly; | ||
96 | int (*nat_callforwarding_hook) (struct sk_buff **pskb, | ||
97 | struct nf_conn *ct, | ||
98 | enum ip_conntrack_info ctinfo, | ||
99 | unsigned char **data, int dataoff, | ||
100 | TransportAddress *taddr, __be16 port, | ||
101 | struct nf_conntrack_expect *exp) __read_mostly; | ||
102 | int (*nat_q931_hook) (struct sk_buff **pskb, | ||
103 | struct nf_conn *ct, | ||
104 | enum ip_conntrack_info ctinfo, | ||
105 | unsigned char **data, TransportAddress *taddr, int idx, | ||
106 | __be16 port, struct nf_conntrack_expect *exp) | ||
107 | __read_mostly; | ||
108 | |||
109 | static DEFINE_SPINLOCK(nf_h323_lock); | ||
110 | static char *h323_buffer; | ||
111 | |||
112 | static struct nf_conntrack_helper nf_conntrack_helper_h245; | ||
113 | static struct nf_conntrack_helper nf_conntrack_helper_q931[]; | ||
114 | static struct nf_conntrack_helper nf_conntrack_helper_ras[]; | ||
115 | |||
116 | /****************************************************************************/ | ||
117 | static int get_tpkt_data(struct sk_buff **pskb, unsigned int protoff, | ||
118 | struct nf_conn *ct, enum ip_conntrack_info ctinfo, | ||
119 | unsigned char **data, int *datalen, int *dataoff) | ||
120 | { | ||
121 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | ||
122 | int dir = CTINFO2DIR(ctinfo); | ||
123 | struct tcphdr _tcph, *th; | ||
124 | int tcpdatalen; | ||
125 | int tcpdataoff; | ||
126 | unsigned char *tpkt; | ||
127 | int tpktlen; | ||
128 | int tpktoff; | ||
129 | |||
130 | /* Get TCP header */ | ||
131 | th = skb_header_pointer(*pskb, protoff, sizeof(_tcph), &_tcph); | ||
132 | if (th == NULL) | ||
133 | return 0; | ||
134 | |||
135 | /* Get TCP data offset */ | ||
136 | tcpdataoff = protoff + th->doff * 4; | ||
137 | |||
138 | /* Get TCP data length */ | ||
139 | tcpdatalen = (*pskb)->len - tcpdataoff; | ||
140 | if (tcpdatalen <= 0) /* No TCP data */ | ||
141 | goto clear_out; | ||
142 | |||
143 | if (*data == NULL) { /* first TPKT */ | ||
144 | /* Get first TPKT pointer */ | ||
145 | tpkt = skb_header_pointer(*pskb, tcpdataoff, tcpdatalen, | ||
146 | h323_buffer); | ||
147 | BUG_ON(tpkt == NULL); | ||
148 | |||
149 | /* Validate TPKT identifier */ | ||
150 | if (tcpdatalen < 4 || tpkt[0] != 0x03 || tpkt[1] != 0) { | ||
151 | /* Netmeeting sends TPKT header and data separately */ | ||
152 | if (info->tpkt_len[dir] > 0) { | ||
153 | DEBUGP("nf_ct_h323: previous packet " | ||
154 | "indicated separate TPKT data of %hu " | ||
155 | "bytes\n", info->tpkt_len[dir]); | ||
156 | if (info->tpkt_len[dir] <= tcpdatalen) { | ||
157 | /* Yes, there was a TPKT header | ||
158 | * received */ | ||
159 | *data = tpkt; | ||
160 | *datalen = info->tpkt_len[dir]; | ||
161 | *dataoff = 0; | ||
162 | goto out; | ||
163 | } | ||
164 | |||
165 | /* Fragmented TPKT */ | ||
166 | if (net_ratelimit()) | ||
167 | printk("nf_ct_h323: " | ||
168 | "fragmented TPKT\n"); | ||
169 | goto clear_out; | ||
170 | } | ||
171 | |||
172 | /* It is not even a TPKT */ | ||
173 | return 0; | ||
174 | } | ||
175 | tpktoff = 0; | ||
176 | } else { /* Next TPKT */ | ||
177 | tpktoff = *dataoff + *datalen; | ||
178 | tcpdatalen -= tpktoff; | ||
179 | if (tcpdatalen <= 4) /* No more TPKT */ | ||
180 | goto clear_out; | ||
181 | tpkt = *data + *datalen; | ||
182 | |||
183 | /* Validate TPKT identifier */ | ||
184 | if (tpkt[0] != 0x03 || tpkt[1] != 0) | ||
185 | goto clear_out; | ||
186 | } | ||
187 | |||
188 | /* Validate TPKT length */ | ||
189 | tpktlen = tpkt[2] * 256 + tpkt[3]; | ||
190 | if (tpktlen < 4) | ||
191 | goto clear_out; | ||
192 | if (tpktlen > tcpdatalen) { | ||
193 | if (tcpdatalen == 4) { /* Separate TPKT header */ | ||
194 | /* Netmeeting sends TPKT header and data separately */ | ||
195 | DEBUGP("nf_ct_h323: separate TPKT header indicates " | ||
196 | "there will be TPKT data of %hu bytes\n", | ||
197 | tpktlen - 4); | ||
198 | info->tpkt_len[dir] = tpktlen - 4; | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | if (net_ratelimit()) | ||
203 | printk("nf_ct_h323: incomplete TPKT (fragmented?)\n"); | ||
204 | goto clear_out; | ||
205 | } | ||
206 | |||
207 | /* This is the encapsulated data */ | ||
208 | *data = tpkt + 4; | ||
209 | *datalen = tpktlen - 4; | ||
210 | *dataoff = tpktoff + 4; | ||
211 | |||
212 | out: | ||
213 | /* Clear TPKT length */ | ||
214 | info->tpkt_len[dir] = 0; | ||
215 | return 1; | ||
216 | |||
217 | clear_out: | ||
218 | info->tpkt_len[dir] = 0; | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | /****************************************************************************/ | ||
223 | static int get_h245_addr(struct nf_conn *ct, unsigned char *data, | ||
224 | H245_TransportAddress *taddr, | ||
225 | union nf_conntrack_address *addr, __be16 *port) | ||
226 | { | ||
227 | unsigned char *p; | ||
228 | int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; | ||
229 | int len; | ||
230 | |||
231 | if (taddr->choice != eH245_TransportAddress_unicastAddress) | ||
232 | return 0; | ||
233 | |||
234 | switch (taddr->unicastAddress.choice) { | ||
235 | case eUnicastAddress_iPAddress: | ||
236 | if (family != AF_INET) | ||
237 | return 0; | ||
238 | p = data + taddr->unicastAddress.iPAddress.network; | ||
239 | len = 4; | ||
240 | break; | ||
241 | case eUnicastAddress_iP6Address: | ||
242 | if (family != AF_INET6) | ||
243 | return 0; | ||
244 | p = data + taddr->unicastAddress.iP6Address.network; | ||
245 | len = 16; | ||
246 | break; | ||
247 | default: | ||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | memcpy(addr, p, len); | ||
252 | memset((void *)addr + len, 0, sizeof(*addr) - len); | ||
253 | memcpy(port, p + len, sizeof(__be16)); | ||
254 | |||
255 | return 1; | ||
256 | } | ||
257 | |||
258 | /****************************************************************************/ | ||
259 | static int expect_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct, | ||
260 | enum ip_conntrack_info ctinfo, | ||
261 | unsigned char **data, int dataoff, | ||
262 | H245_TransportAddress *taddr) | ||
263 | { | ||
264 | int dir = CTINFO2DIR(ctinfo); | ||
265 | int ret = 0; | ||
266 | __be16 port; | ||
267 | __be16 rtp_port, rtcp_port; | ||
268 | union nf_conntrack_address addr; | ||
269 | struct nf_conntrack_expect *rtp_exp; | ||
270 | struct nf_conntrack_expect *rtcp_exp; | ||
271 | typeof(nat_rtp_rtcp_hook) nat_rtp_rtcp; | ||
272 | |||
273 | /* Read RTP or RTCP address */ | ||
274 | if (!get_h245_addr(ct, *data, taddr, &addr, &port) || | ||
275 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || | ||
276 | port == 0) | ||
277 | return 0; | ||
278 | |||
279 | /* RTP port is even */ | ||
280 | port &= htons(~1); | ||
281 | rtp_port = port; | ||
282 | rtcp_port = htons(ntohs(port) + 1); | ||
283 | |||
284 | /* Create expect for RTP */ | ||
285 | if ((rtp_exp = nf_conntrack_expect_alloc(ct)) == NULL) | ||
286 | return -1; | ||
287 | nf_conntrack_expect_init(rtp_exp, ct->tuplehash[!dir].tuple.src.l3num, | ||
288 | &ct->tuplehash[!dir].tuple.src.u3, | ||
289 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
290 | IPPROTO_UDP, NULL, &rtp_port); | ||
291 | |||
292 | /* Create expect for RTCP */ | ||
293 | if ((rtcp_exp = nf_conntrack_expect_alloc(ct)) == NULL) { | ||
294 | nf_conntrack_expect_put(rtp_exp); | ||
295 | return -1; | ||
296 | } | ||
297 | nf_conntrack_expect_init(rtcp_exp, ct->tuplehash[!dir].tuple.src.l3num, | ||
298 | &ct->tuplehash[!dir].tuple.src.u3, | ||
299 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
300 | IPPROTO_UDP, NULL, &rtcp_port); | ||
301 | |||
302 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, | ||
303 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
304 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && | ||
305 | (nat_rtp_rtcp = rcu_dereference(nat_rtp_rtcp_hook)) && | ||
306 | ct->status & IPS_NAT_MASK) { | ||
307 | /* NAT needed */ | ||
308 | ret = nat_rtp_rtcp(pskb, ct, ctinfo, data, dataoff, | ||
309 | taddr, port, rtp_port, rtp_exp, rtcp_exp); | ||
310 | } else { /* Conntrack only */ | ||
311 | if (nf_conntrack_expect_related(rtp_exp) == 0) { | ||
312 | if (nf_conntrack_expect_related(rtcp_exp) == 0) { | ||
313 | DEBUGP("nf_ct_h323: expect RTP "); | ||
314 | NF_CT_DUMP_TUPLE(&rtp_exp->tuple); | ||
315 | DEBUGP("nf_ct_h323: expect RTCP "); | ||
316 | NF_CT_DUMP_TUPLE(&rtcp_exp->tuple); | ||
317 | } else { | ||
318 | nf_conntrack_unexpect_related(rtp_exp); | ||
319 | ret = -1; | ||
320 | } | ||
321 | } else | ||
322 | ret = -1; | ||
323 | } | ||
324 | |||
325 | nf_conntrack_expect_put(rtp_exp); | ||
326 | nf_conntrack_expect_put(rtcp_exp); | ||
327 | |||
328 | return ret; | ||
329 | } | ||
330 | |||
331 | /****************************************************************************/ | ||
332 | static int expect_t120(struct sk_buff **pskb, | ||
333 | struct nf_conn *ct, | ||
334 | enum ip_conntrack_info ctinfo, | ||
335 | unsigned char **data, int dataoff, | ||
336 | H245_TransportAddress *taddr) | ||
337 | { | ||
338 | int dir = CTINFO2DIR(ctinfo); | ||
339 | int ret = 0; | ||
340 | __be16 port; | ||
341 | union nf_conntrack_address addr; | ||
342 | struct nf_conntrack_expect *exp; | ||
343 | typeof(nat_t120_hook) nat_t120; | ||
344 | |||
345 | /* Read T.120 address */ | ||
346 | if (!get_h245_addr(ct, *data, taddr, &addr, &port) || | ||
347 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || | ||
348 | port == 0) | ||
349 | return 0; | ||
350 | |||
351 | /* Create expect for T.120 connections */ | ||
352 | if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) | ||
353 | return -1; | ||
354 | nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, | ||
355 | &ct->tuplehash[!dir].tuple.src.u3, | ||
356 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
357 | IPPROTO_TCP, NULL, &port); | ||
358 | exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple channels */ | ||
359 | |||
360 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, | ||
361 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
362 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && | ||
363 | (nat_t120 = rcu_dereference(nat_t120_hook)) && | ||
364 | ct->status & IPS_NAT_MASK) { | ||
365 | /* NAT needed */ | ||
366 | ret = nat_t120(pskb, ct, ctinfo, data, dataoff, taddr, | ||
367 | port, exp); | ||
368 | } else { /* Conntrack only */ | ||
369 | if (nf_conntrack_expect_related(exp) == 0) { | ||
370 | DEBUGP("nf_ct_h323: expect T.120 "); | ||
371 | NF_CT_DUMP_TUPLE(&exp->tuple); | ||
372 | } else | ||
373 | ret = -1; | ||
374 | } | ||
375 | |||
376 | nf_conntrack_expect_put(exp); | ||
377 | |||
378 | return ret; | ||
379 | } | ||
380 | |||
381 | /****************************************************************************/ | ||
382 | static int process_h245_channel(struct sk_buff **pskb, | ||
383 | struct nf_conn *ct, | ||
384 | enum ip_conntrack_info ctinfo, | ||
385 | unsigned char **data, int dataoff, | ||
386 | H2250LogicalChannelParameters *channel) | ||
387 | { | ||
388 | int ret; | ||
389 | |||
390 | if (channel->options & eH2250LogicalChannelParameters_mediaChannel) { | ||
391 | /* RTP */ | ||
392 | ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff, | ||
393 | &channel->mediaChannel); | ||
394 | if (ret < 0) | ||
395 | return -1; | ||
396 | } | ||
397 | |||
398 | if (channel-> | ||
399 | options & eH2250LogicalChannelParameters_mediaControlChannel) { | ||
400 | /* RTCP */ | ||
401 | ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff, | ||
402 | &channel->mediaControlChannel); | ||
403 | if (ret < 0) | ||
404 | return -1; | ||
405 | } | ||
406 | |||
407 | return 0; | ||
408 | } | ||
409 | |||
410 | /****************************************************************************/ | ||
411 | static int process_olc(struct sk_buff **pskb, struct nf_conn *ct, | ||
412 | enum ip_conntrack_info ctinfo, | ||
413 | unsigned char **data, int dataoff, | ||
414 | OpenLogicalChannel *olc) | ||
415 | { | ||
416 | int ret; | ||
417 | |||
418 | DEBUGP("nf_ct_h323: OpenLogicalChannel\n"); | ||
419 | |||
420 | if (olc->forwardLogicalChannelParameters.multiplexParameters.choice == | ||
421 | eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters) | ||
422 | { | ||
423 | ret = process_h245_channel(pskb, ct, ctinfo, data, dataoff, | ||
424 | &olc-> | ||
425 | forwardLogicalChannelParameters. | ||
426 | multiplexParameters. | ||
427 | h2250LogicalChannelParameters); | ||
428 | if (ret < 0) | ||
429 | return -1; | ||
430 | } | ||
431 | |||
432 | if ((olc->options & | ||
433 | eOpenLogicalChannel_reverseLogicalChannelParameters) && | ||
434 | (olc->reverseLogicalChannelParameters.options & | ||
435 | eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters) | ||
436 | && (olc->reverseLogicalChannelParameters.multiplexParameters. | ||
437 | choice == | ||
438 | eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)) | ||
439 | { | ||
440 | ret = | ||
441 | process_h245_channel(pskb, ct, ctinfo, data, dataoff, | ||
442 | &olc-> | ||
443 | reverseLogicalChannelParameters. | ||
444 | multiplexParameters. | ||
445 | h2250LogicalChannelParameters); | ||
446 | if (ret < 0) | ||
447 | return -1; | ||
448 | } | ||
449 | |||
450 | if ((olc->options & eOpenLogicalChannel_separateStack) && | ||
451 | olc->forwardLogicalChannelParameters.dataType.choice == | ||
452 | eDataType_data && | ||
453 | olc->forwardLogicalChannelParameters.dataType.data.application. | ||
454 | choice == eDataApplicationCapability_application_t120 && | ||
455 | olc->forwardLogicalChannelParameters.dataType.data.application. | ||
456 | t120.choice == eDataProtocolCapability_separateLANStack && | ||
457 | olc->separateStack.networkAddress.choice == | ||
458 | eNetworkAccessParameters_networkAddress_localAreaAddress) { | ||
459 | ret = expect_t120(pskb, ct, ctinfo, data, dataoff, | ||
460 | &olc->separateStack.networkAddress. | ||
461 | localAreaAddress); | ||
462 | if (ret < 0) | ||
463 | return -1; | ||
464 | } | ||
465 | |||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | /****************************************************************************/ | ||
470 | static int process_olca(struct sk_buff **pskb, struct nf_conn *ct, | ||
471 | enum ip_conntrack_info ctinfo, | ||
472 | unsigned char **data, int dataoff, | ||
473 | OpenLogicalChannelAck *olca) | ||
474 | { | ||
475 | H2250LogicalChannelAckParameters *ack; | ||
476 | int ret; | ||
477 | |||
478 | DEBUGP("nf_ct_h323: OpenLogicalChannelAck\n"); | ||
479 | |||
480 | if ((olca->options & | ||
481 | eOpenLogicalChannelAck_reverseLogicalChannelParameters) && | ||
482 | (olca->reverseLogicalChannelParameters.options & | ||
483 | eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters) | ||
484 | && (olca->reverseLogicalChannelParameters.multiplexParameters. | ||
485 | choice == | ||
486 | eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters)) | ||
487 | { | ||
488 | ret = process_h245_channel(pskb, ct, ctinfo, data, dataoff, | ||
489 | &olca-> | ||
490 | reverseLogicalChannelParameters. | ||
491 | multiplexParameters. | ||
492 | h2250LogicalChannelParameters); | ||
493 | if (ret < 0) | ||
494 | return -1; | ||
495 | } | ||
496 | |||
497 | if ((olca->options & | ||
498 | eOpenLogicalChannelAck_forwardMultiplexAckParameters) && | ||
499 | (olca->forwardMultiplexAckParameters.choice == | ||
500 | eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters)) | ||
501 | { | ||
502 | ack = &olca->forwardMultiplexAckParameters. | ||
503 | h2250LogicalChannelAckParameters; | ||
504 | if (ack->options & | ||
505 | eH2250LogicalChannelAckParameters_mediaChannel) { | ||
506 | /* RTP */ | ||
507 | ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff, | ||
508 | &ack->mediaChannel); | ||
509 | if (ret < 0) | ||
510 | return -1; | ||
511 | } | ||
512 | |||
513 | if (ack->options & | ||
514 | eH2250LogicalChannelAckParameters_mediaControlChannel) { | ||
515 | /* RTCP */ | ||
516 | ret = expect_rtp_rtcp(pskb, ct, ctinfo, data, dataoff, | ||
517 | &ack->mediaControlChannel); | ||
518 | if (ret < 0) | ||
519 | return -1; | ||
520 | } | ||
521 | } | ||
522 | |||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | /****************************************************************************/ | ||
527 | static int process_h245(struct sk_buff **pskb, struct nf_conn *ct, | ||
528 | enum ip_conntrack_info ctinfo, | ||
529 | unsigned char **data, int dataoff, | ||
530 | MultimediaSystemControlMessage *mscm) | ||
531 | { | ||
532 | switch (mscm->choice) { | ||
533 | case eMultimediaSystemControlMessage_request: | ||
534 | if (mscm->request.choice == | ||
535 | eRequestMessage_openLogicalChannel) { | ||
536 | return process_olc(pskb, ct, ctinfo, data, dataoff, | ||
537 | &mscm->request.openLogicalChannel); | ||
538 | } | ||
539 | DEBUGP("nf_ct_h323: H.245 Request %d\n", | ||
540 | mscm->request.choice); | ||
541 | break; | ||
542 | case eMultimediaSystemControlMessage_response: | ||
543 | if (mscm->response.choice == | ||
544 | eResponseMessage_openLogicalChannelAck) { | ||
545 | return process_olca(pskb, ct, ctinfo, data, dataoff, | ||
546 | &mscm->response. | ||
547 | openLogicalChannelAck); | ||
548 | } | ||
549 | DEBUGP("nf_ct_h323: H.245 Response %d\n", | ||
550 | mscm->response.choice); | ||
551 | break; | ||
552 | default: | ||
553 | DEBUGP("nf_ct_h323: H.245 signal %d\n", mscm->choice); | ||
554 | break; | ||
555 | } | ||
556 | |||
557 | return 0; | ||
558 | } | ||
559 | |||
560 | /****************************************************************************/ | ||
561 | static int h245_help(struct sk_buff **pskb, unsigned int protoff, | ||
562 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | ||
563 | { | ||
564 | static MultimediaSystemControlMessage mscm; | ||
565 | unsigned char *data = NULL; | ||
566 | int datalen; | ||
567 | int dataoff; | ||
568 | int ret; | ||
569 | |||
570 | /* Until there's been traffic both ways, don't look in packets. */ | ||
571 | if (ctinfo != IP_CT_ESTABLISHED && | ||
572 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { | ||
573 | return NF_ACCEPT; | ||
574 | } | ||
575 | DEBUGP("nf_ct_h245: skblen = %u\n", (*pskb)->len); | ||
576 | |||
577 | spin_lock_bh(&nf_h323_lock); | ||
578 | |||
579 | /* Process each TPKT */ | ||
580 | while (get_tpkt_data(pskb, protoff, ct, ctinfo, | ||
581 | &data, &datalen, &dataoff)) { | ||
582 | DEBUGP("nf_ct_h245: TPKT len=%d ", datalen); | ||
583 | NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); | ||
584 | |||
585 | /* Decode H.245 signal */ | ||
586 | ret = DecodeMultimediaSystemControlMessage(data, datalen, | ||
587 | &mscm); | ||
588 | if (ret < 0) { | ||
589 | if (net_ratelimit()) | ||
590 | printk("nf_ct_h245: decoding error: %s\n", | ||
591 | ret == H323_ERROR_BOUND ? | ||
592 | "out of bound" : "out of range"); | ||
593 | /* We don't drop when decoding error */ | ||
594 | break; | ||
595 | } | ||
596 | |||
597 | /* Process H.245 signal */ | ||
598 | if (process_h245(pskb, ct, ctinfo, &data, dataoff, &mscm) < 0) | ||
599 | goto drop; | ||
600 | } | ||
601 | |||
602 | spin_unlock_bh(&nf_h323_lock); | ||
603 | return NF_ACCEPT; | ||
604 | |||
605 | drop: | ||
606 | spin_unlock_bh(&nf_h323_lock); | ||
607 | if (net_ratelimit()) | ||
608 | printk("nf_ct_h245: packet dropped\n"); | ||
609 | return NF_DROP; | ||
610 | } | ||
611 | |||
612 | /****************************************************************************/ | ||
613 | static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = { | ||
614 | .name = "H.245", | ||
615 | .me = THIS_MODULE, | ||
616 | .max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */, | ||
617 | .timeout = 240, | ||
618 | .tuple.dst.protonum = IPPROTO_UDP, | ||
619 | .mask.src.u.udp.port = __constant_htons(0xFFFF), | ||
620 | .mask.dst.protonum = 0xFF, | ||
621 | .help = h245_help | ||
622 | }; | ||
623 | |||
624 | /****************************************************************************/ | ||
625 | int get_h225_addr(struct nf_conn *ct, unsigned char *data, | ||
626 | TransportAddress *taddr, | ||
627 | union nf_conntrack_address *addr, __be16 *port) | ||
628 | { | ||
629 | unsigned char *p; | ||
630 | int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; | ||
631 | int len; | ||
632 | |||
633 | switch (taddr->choice) { | ||
634 | case eTransportAddress_ipAddress: | ||
635 | if (family != AF_INET) | ||
636 | return 0; | ||
637 | p = data + taddr->ipAddress.ip; | ||
638 | len = 4; | ||
639 | break; | ||
640 | case eTransportAddress_ip6Address: | ||
641 | if (family != AF_INET6) | ||
642 | return 0; | ||
643 | p = data + taddr->ip6Address.ip6; | ||
644 | len = 16; | ||
645 | break; | ||
646 | default: | ||
647 | return 0; | ||
648 | } | ||
649 | |||
650 | memcpy(addr, p, len); | ||
651 | memset((void *)addr + len, 0, sizeof(*addr) - len); | ||
652 | memcpy(port, p + len, sizeof(__be16)); | ||
653 | |||
654 | return 1; | ||
655 | } | ||
656 | |||
657 | /****************************************************************************/ | ||
658 | static int expect_h245(struct sk_buff **pskb, struct nf_conn *ct, | ||
659 | enum ip_conntrack_info ctinfo, | ||
660 | unsigned char **data, int dataoff, | ||
661 | TransportAddress *taddr) | ||
662 | { | ||
663 | int dir = CTINFO2DIR(ctinfo); | ||
664 | int ret = 0; | ||
665 | __be16 port; | ||
666 | union nf_conntrack_address addr; | ||
667 | struct nf_conntrack_expect *exp; | ||
668 | typeof(nat_h245_hook) nat_h245; | ||
669 | |||
670 | /* Read h245Address */ | ||
671 | if (!get_h225_addr(ct, *data, taddr, &addr, &port) || | ||
672 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) || | ||
673 | port == 0) | ||
674 | return 0; | ||
675 | |||
676 | /* Create expect for h245 connection */ | ||
677 | if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) | ||
678 | return -1; | ||
679 | nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, | ||
680 | &ct->tuplehash[!dir].tuple.src.u3, | ||
681 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
682 | IPPROTO_TCP, NULL, &port); | ||
683 | exp->helper = &nf_conntrack_helper_h245; | ||
684 | |||
685 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, | ||
686 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
687 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && | ||
688 | (nat_h245 = rcu_dereference(nat_h245_hook)) && | ||
689 | ct->status & IPS_NAT_MASK) { | ||
690 | /* NAT needed */ | ||
691 | ret = nat_h245(pskb, ct, ctinfo, data, dataoff, taddr, | ||
692 | port, exp); | ||
693 | } else { /* Conntrack only */ | ||
694 | if (nf_conntrack_expect_related(exp) == 0) { | ||
695 | DEBUGP("nf_ct_q931: expect H.245 "); | ||
696 | NF_CT_DUMP_TUPLE(&exp->tuple); | ||
697 | } else | ||
698 | ret = -1; | ||
699 | } | ||
700 | |||
701 | nf_conntrack_expect_put(exp); | ||
702 | |||
703 | return ret; | ||
704 | } | ||
705 | |||
706 | /* If the calling party is on the same side of the forward-to party, | ||
707 | * we don't need to track the second call */ | ||
708 | static int callforward_do_filter(union nf_conntrack_address *src, | ||
709 | union nf_conntrack_address *dst, | ||
710 | int family) | ||
711 | { | ||
712 | struct flowi fl1, fl2; | ||
713 | int ret = 0; | ||
714 | |||
715 | memset(&fl1, 0, sizeof(fl1)); | ||
716 | memset(&fl2, 0, sizeof(fl2)); | ||
717 | |||
718 | switch (family) { | ||
719 | case AF_INET: { | ||
720 | struct rtable *rt1, *rt2; | ||
721 | |||
722 | fl1.fl4_dst = src->ip; | ||
723 | fl2.fl4_dst = dst->ip; | ||
724 | if (ip_route_output_key(&rt1, &fl1) == 0) { | ||
725 | if (ip_route_output_key(&rt2, &fl2) == 0) { | ||
726 | if (rt1->rt_gateway == rt2->rt_gateway && | ||
727 | rt1->u.dst.dev == rt2->u.dst.dev) | ||
728 | ret = 1; | ||
729 | dst_release(&rt2->u.dst); | ||
730 | } | ||
731 | dst_release(&rt1->u.dst); | ||
732 | } | ||
733 | break; | ||
734 | } | ||
735 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
736 | case AF_INET6: { | ||
737 | struct rt6_info *rt1, *rt2; | ||
738 | |||
739 | memcpy(&fl1.fl6_dst, src, sizeof(fl1.fl6_dst)); | ||
740 | memcpy(&fl2.fl6_dst, dst, sizeof(fl2.fl6_dst)); | ||
741 | rt1 = (struct rt6_info *)ip6_route_output(NULL, &fl1); | ||
742 | if (rt1) { | ||
743 | rt2 = (struct rt6_info *)ip6_route_output(NULL, &fl2); | ||
744 | if (rt2) { | ||
745 | if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, | ||
746 | sizeof(rt1->rt6i_gateway)) && | ||
747 | rt1->u.dst.dev == rt2->u.dst.dev) | ||
748 | ret = 1; | ||
749 | dst_release(&rt2->u.dst); | ||
750 | } | ||
751 | dst_release(&rt1->u.dst); | ||
752 | } | ||
753 | break; | ||
754 | } | ||
755 | #endif | ||
756 | } | ||
757 | return ret; | ||
758 | |||
759 | } | ||
760 | |||
761 | /****************************************************************************/ | ||
762 | static int expect_callforwarding(struct sk_buff **pskb, | ||
763 | struct nf_conn *ct, | ||
764 | enum ip_conntrack_info ctinfo, | ||
765 | unsigned char **data, int dataoff, | ||
766 | TransportAddress *taddr) | ||
767 | { | ||
768 | int dir = CTINFO2DIR(ctinfo); | ||
769 | int ret = 0; | ||
770 | __be16 port; | ||
771 | union nf_conntrack_address addr; | ||
772 | struct nf_conntrack_expect *exp; | ||
773 | typeof(nat_callforwarding_hook) nat_callforwarding; | ||
774 | |||
775 | /* Read alternativeAddress */ | ||
776 | if (!get_h225_addr(ct, *data, taddr, &addr, &port) || port == 0) | ||
777 | return 0; | ||
778 | |||
779 | /* If the calling party is on the same side of the forward-to party, | ||
780 | * we don't need to track the second call */ | ||
781 | if (callforward_filter && | ||
782 | callforward_do_filter(&addr, &ct->tuplehash[!dir].tuple.src.u3, | ||
783 | ct->tuplehash[!dir].tuple.src.l3num)) { | ||
784 | DEBUGP("nf_ct_q931: Call Forwarding not tracked\n"); | ||
785 | return 0; | ||
786 | } | ||
787 | |||
788 | /* Create expect for the second call leg */ | ||
789 | if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) | ||
790 | return -1; | ||
791 | nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, | ||
792 | &ct->tuplehash[!dir].tuple.src.u3, &addr, | ||
793 | IPPROTO_TCP, NULL, &port); | ||
794 | exp->helper = nf_conntrack_helper_q931; | ||
795 | |||
796 | if (memcmp(&ct->tuplehash[dir].tuple.src.u3, | ||
797 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
798 | sizeof(ct->tuplehash[dir].tuple.src.u3)) && | ||
799 | (nat_callforwarding = rcu_dereference(nat_callforwarding_hook)) && | ||
800 | ct->status & IPS_NAT_MASK) { | ||
801 | /* Need NAT */ | ||
802 | ret = nat_callforwarding(pskb, ct, ctinfo, data, dataoff, | ||
803 | taddr, port, exp); | ||
804 | } else { /* Conntrack only */ | ||
805 | if (nf_conntrack_expect_related(exp) == 0) { | ||
806 | DEBUGP("nf_ct_q931: expect Call Forwarding "); | ||
807 | NF_CT_DUMP_TUPLE(&exp->tuple); | ||
808 | } else | ||
809 | ret = -1; | ||
810 | } | ||
811 | |||
812 | nf_conntrack_expect_put(exp); | ||
813 | |||
814 | return ret; | ||
815 | } | ||
816 | |||
817 | /****************************************************************************/ | ||
818 | static int process_setup(struct sk_buff **pskb, struct nf_conn *ct, | ||
819 | enum ip_conntrack_info ctinfo, | ||
820 | unsigned char **data, int dataoff, | ||
821 | Setup_UUIE *setup) | ||
822 | { | ||
823 | int dir = CTINFO2DIR(ctinfo); | ||
824 | int ret; | ||
825 | int i; | ||
826 | __be16 port; | ||
827 | union nf_conntrack_address addr; | ||
828 | typeof(set_h225_addr_hook) set_h225_addr; | ||
829 | |||
830 | DEBUGP("nf_ct_q931: Setup\n"); | ||
831 | |||
832 | if (setup->options & eSetup_UUIE_h245Address) { | ||
833 | ret = expect_h245(pskb, ct, ctinfo, data, dataoff, | ||
834 | &setup->h245Address); | ||
835 | if (ret < 0) | ||
836 | return -1; | ||
837 | } | ||
838 | |||
839 | set_h225_addr = rcu_dereference(set_h225_addr_hook); | ||
840 | if ((setup->options & eSetup_UUIE_destCallSignalAddress) && | ||
841 | (set_h225_addr) && ct->status && IPS_NAT_MASK && | ||
842 | get_h225_addr(ct, *data, &setup->destCallSignalAddress, | ||
843 | &addr, &port) && | ||
844 | memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) { | ||
845 | DEBUGP("nf_ct_q931: set destCallSignalAddress " | ||
846 | NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n", | ||
847 | NIP6(*(struct in6_addr *)&addr), ntohs(port), | ||
848 | NIP6(*(struct in6_addr *)&ct->tuplehash[!dir].tuple.src.u3), | ||
849 | ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port)); | ||
850 | ret = set_h225_addr(pskb, data, dataoff, | ||
851 | &setup->destCallSignalAddress, | ||
852 | &ct->tuplehash[!dir].tuple.src.u3, | ||
853 | ct->tuplehash[!dir].tuple.src.u.tcp.port); | ||
854 | if (ret < 0) | ||
855 | return -1; | ||
856 | } | ||
857 | |||
858 | if ((setup->options & eSetup_UUIE_sourceCallSignalAddress) && | ||
859 | (set_h225_addr) && ct->status & IPS_NAT_MASK && | ||
860 | get_h225_addr(ct, *data, &setup->sourceCallSignalAddress, | ||
861 | &addr, &port) && | ||
862 | memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) { | ||
863 | DEBUGP("nf_ct_q931: set sourceCallSignalAddress " | ||
864 | NIP6_FMT ":%hu->" NIP6_FMT ":%hu\n", | ||
865 | NIP6(*(struct in6_addr *)&addr), ntohs(port), | ||
866 | NIP6(*(struct in6_addr *)&ct->tuplehash[!dir].tuple.dst.u3), | ||
867 | ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port)); | ||
868 | ret = set_h225_addr(pskb, data, dataoff, | ||
869 | &setup->sourceCallSignalAddress, | ||
870 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
871 | ct->tuplehash[!dir].tuple.dst.u.tcp.port); | ||
872 | if (ret < 0) | ||
873 | return -1; | ||
874 | } | ||
875 | |||
876 | if (setup->options & eSetup_UUIE_fastStart) { | ||
877 | for (i = 0; i < setup->fastStart.count; i++) { | ||
878 | ret = process_olc(pskb, ct, ctinfo, data, dataoff, | ||
879 | &setup->fastStart.item[i]); | ||
880 | if (ret < 0) | ||
881 | return -1; | ||
882 | } | ||
883 | } | ||
884 | |||
885 | return 0; | ||
886 | } | ||
887 | |||
888 | /****************************************************************************/ | ||
889 | static int process_callproceeding(struct sk_buff **pskb, | ||
890 | struct nf_conn *ct, | ||
891 | enum ip_conntrack_info ctinfo, | ||
892 | unsigned char **data, int dataoff, | ||
893 | CallProceeding_UUIE *callproc) | ||
894 | { | ||
895 | int ret; | ||
896 | int i; | ||
897 | |||
898 | DEBUGP("nf_ct_q931: CallProceeding\n"); | ||
899 | |||
900 | if (callproc->options & eCallProceeding_UUIE_h245Address) { | ||
901 | ret = expect_h245(pskb, ct, ctinfo, data, dataoff, | ||
902 | &callproc->h245Address); | ||
903 | if (ret < 0) | ||
904 | return -1; | ||
905 | } | ||
906 | |||
907 | if (callproc->options & eCallProceeding_UUIE_fastStart) { | ||
908 | for (i = 0; i < callproc->fastStart.count; i++) { | ||
909 | ret = process_olc(pskb, ct, ctinfo, data, dataoff, | ||
910 | &callproc->fastStart.item[i]); | ||
911 | if (ret < 0) | ||
912 | return -1; | ||
913 | } | ||
914 | } | ||
915 | |||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | /****************************************************************************/ | ||
920 | static int process_connect(struct sk_buff **pskb, struct nf_conn *ct, | ||
921 | enum ip_conntrack_info ctinfo, | ||
922 | unsigned char **data, int dataoff, | ||
923 | Connect_UUIE *connect) | ||
924 | { | ||
925 | int ret; | ||
926 | int i; | ||
927 | |||
928 | DEBUGP("nf_ct_q931: Connect\n"); | ||
929 | |||
930 | if (connect->options & eConnect_UUIE_h245Address) { | ||
931 | ret = expect_h245(pskb, ct, ctinfo, data, dataoff, | ||
932 | &connect->h245Address); | ||
933 | if (ret < 0) | ||
934 | return -1; | ||
935 | } | ||
936 | |||
937 | if (connect->options & eConnect_UUIE_fastStart) { | ||
938 | for (i = 0; i < connect->fastStart.count; i++) { | ||
939 | ret = process_olc(pskb, ct, ctinfo, data, dataoff, | ||
940 | &connect->fastStart.item[i]); | ||
941 | if (ret < 0) | ||
942 | return -1; | ||
943 | } | ||
944 | } | ||
945 | |||
946 | return 0; | ||
947 | } | ||
948 | |||
949 | /****************************************************************************/ | ||
950 | static int process_alerting(struct sk_buff **pskb, struct nf_conn *ct, | ||
951 | enum ip_conntrack_info ctinfo, | ||
952 | unsigned char **data, int dataoff, | ||
953 | Alerting_UUIE *alert) | ||
954 | { | ||
955 | int ret; | ||
956 | int i; | ||
957 | |||
958 | DEBUGP("nf_ct_q931: Alerting\n"); | ||
959 | |||
960 | if (alert->options & eAlerting_UUIE_h245Address) { | ||
961 | ret = expect_h245(pskb, ct, ctinfo, data, dataoff, | ||
962 | &alert->h245Address); | ||
963 | if (ret < 0) | ||
964 | return -1; | ||
965 | } | ||
966 | |||
967 | if (alert->options & eAlerting_UUIE_fastStart) { | ||
968 | for (i = 0; i < alert->fastStart.count; i++) { | ||
969 | ret = process_olc(pskb, ct, ctinfo, data, dataoff, | ||
970 | &alert->fastStart.item[i]); | ||
971 | if (ret < 0) | ||
972 | return -1; | ||
973 | } | ||
974 | } | ||
975 | |||
976 | return 0; | ||
977 | } | ||
978 | |||
979 | /****************************************************************************/ | ||
980 | static int process_information(struct sk_buff **pskb, | ||
981 | struct nf_conn *ct, | ||
982 | enum ip_conntrack_info ctinfo, | ||
983 | unsigned char **data, int dataoff, | ||
984 | Information_UUIE *info) | ||
985 | { | ||
986 | int ret; | ||
987 | int i; | ||
988 | |||
989 | DEBUGP("nf_ct_q931: Information\n"); | ||
990 | |||
991 | if (info->options & eInformation_UUIE_fastStart) { | ||
992 | for (i = 0; i < info->fastStart.count; i++) { | ||
993 | ret = process_olc(pskb, ct, ctinfo, data, dataoff, | ||
994 | &info->fastStart.item[i]); | ||
995 | if (ret < 0) | ||
996 | return -1; | ||
997 | } | ||
998 | } | ||
999 | |||
1000 | return 0; | ||
1001 | } | ||
1002 | |||
1003 | /****************************************************************************/ | ||
1004 | static int process_facility(struct sk_buff **pskb, struct nf_conn *ct, | ||
1005 | enum ip_conntrack_info ctinfo, | ||
1006 | unsigned char **data, int dataoff, | ||
1007 | Facility_UUIE *facility) | ||
1008 | { | ||
1009 | int ret; | ||
1010 | int i; | ||
1011 | |||
1012 | DEBUGP("nf_ct_q931: Facility\n"); | ||
1013 | |||
1014 | if (facility->reason.choice == eFacilityReason_callForwarded) { | ||
1015 | if (facility->options & eFacility_UUIE_alternativeAddress) | ||
1016 | return expect_callforwarding(pskb, ct, ctinfo, data, | ||
1017 | dataoff, | ||
1018 | &facility-> | ||
1019 | alternativeAddress); | ||
1020 | return 0; | ||
1021 | } | ||
1022 | |||
1023 | if (facility->options & eFacility_UUIE_h245Address) { | ||
1024 | ret = expect_h245(pskb, ct, ctinfo, data, dataoff, | ||
1025 | &facility->h245Address); | ||
1026 | if (ret < 0) | ||
1027 | return -1; | ||
1028 | } | ||
1029 | |||
1030 | if (facility->options & eFacility_UUIE_fastStart) { | ||
1031 | for (i = 0; i < facility->fastStart.count; i++) { | ||
1032 | ret = process_olc(pskb, ct, ctinfo, data, dataoff, | ||
1033 | &facility->fastStart.item[i]); | ||
1034 | if (ret < 0) | ||
1035 | return -1; | ||
1036 | } | ||
1037 | } | ||
1038 | |||
1039 | return 0; | ||
1040 | } | ||
1041 | |||
1042 | /****************************************************************************/ | ||
1043 | static int process_progress(struct sk_buff **pskb, struct nf_conn *ct, | ||
1044 | enum ip_conntrack_info ctinfo, | ||
1045 | unsigned char **data, int dataoff, | ||
1046 | Progress_UUIE *progress) | ||
1047 | { | ||
1048 | int ret; | ||
1049 | int i; | ||
1050 | |||
1051 | DEBUGP("nf_ct_q931: Progress\n"); | ||
1052 | |||
1053 | if (progress->options & eProgress_UUIE_h245Address) { | ||
1054 | ret = expect_h245(pskb, ct, ctinfo, data, dataoff, | ||
1055 | &progress->h245Address); | ||
1056 | if (ret < 0) | ||
1057 | return -1; | ||
1058 | } | ||
1059 | |||
1060 | if (progress->options & eProgress_UUIE_fastStart) { | ||
1061 | for (i = 0; i < progress->fastStart.count; i++) { | ||
1062 | ret = process_olc(pskb, ct, ctinfo, data, dataoff, | ||
1063 | &progress->fastStart.item[i]); | ||
1064 | if (ret < 0) | ||
1065 | return -1; | ||
1066 | } | ||
1067 | } | ||
1068 | |||
1069 | return 0; | ||
1070 | } | ||
1071 | |||
1072 | /****************************************************************************/ | ||
1073 | static int process_q931(struct sk_buff **pskb, struct nf_conn *ct, | ||
1074 | enum ip_conntrack_info ctinfo, | ||
1075 | unsigned char **data, int dataoff, Q931 *q931) | ||
1076 | { | ||
1077 | H323_UU_PDU *pdu = &q931->UUIE.h323_uu_pdu; | ||
1078 | int i; | ||
1079 | int ret = 0; | ||
1080 | |||
1081 | switch (pdu->h323_message_body.choice) { | ||
1082 | case eH323_UU_PDU_h323_message_body_setup: | ||
1083 | ret = process_setup(pskb, ct, ctinfo, data, dataoff, | ||
1084 | &pdu->h323_message_body.setup); | ||
1085 | break; | ||
1086 | case eH323_UU_PDU_h323_message_body_callProceeding: | ||
1087 | ret = process_callproceeding(pskb, ct, ctinfo, data, dataoff, | ||
1088 | &pdu->h323_message_body. | ||
1089 | callProceeding); | ||
1090 | break; | ||
1091 | case eH323_UU_PDU_h323_message_body_connect: | ||
1092 | ret = process_connect(pskb, ct, ctinfo, data, dataoff, | ||
1093 | &pdu->h323_message_body.connect); | ||
1094 | break; | ||
1095 | case eH323_UU_PDU_h323_message_body_alerting: | ||
1096 | ret = process_alerting(pskb, ct, ctinfo, data, dataoff, | ||
1097 | &pdu->h323_message_body.alerting); | ||
1098 | break; | ||
1099 | case eH323_UU_PDU_h323_message_body_information: | ||
1100 | ret = process_information(pskb, ct, ctinfo, data, dataoff, | ||
1101 | &pdu->h323_message_body. | ||
1102 | information); | ||
1103 | break; | ||
1104 | case eH323_UU_PDU_h323_message_body_facility: | ||
1105 | ret = process_facility(pskb, ct, ctinfo, data, dataoff, | ||
1106 | &pdu->h323_message_body.facility); | ||
1107 | break; | ||
1108 | case eH323_UU_PDU_h323_message_body_progress: | ||
1109 | ret = process_progress(pskb, ct, ctinfo, data, dataoff, | ||
1110 | &pdu->h323_message_body.progress); | ||
1111 | break; | ||
1112 | default: | ||
1113 | DEBUGP("nf_ct_q931: Q.931 signal %d\n", | ||
1114 | pdu->h323_message_body.choice); | ||
1115 | break; | ||
1116 | } | ||
1117 | |||
1118 | if (ret < 0) | ||
1119 | return -1; | ||
1120 | |||
1121 | if (pdu->options & eH323_UU_PDU_h245Control) { | ||
1122 | for (i = 0; i < pdu->h245Control.count; i++) { | ||
1123 | ret = process_h245(pskb, ct, ctinfo, data, dataoff, | ||
1124 | &pdu->h245Control.item[i]); | ||
1125 | if (ret < 0) | ||
1126 | return -1; | ||
1127 | } | ||
1128 | } | ||
1129 | |||
1130 | return 0; | ||
1131 | } | ||
1132 | |||
1133 | /****************************************************************************/ | ||
1134 | static int q931_help(struct sk_buff **pskb, unsigned int protoff, | ||
1135 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | ||
1136 | { | ||
1137 | static Q931 q931; | ||
1138 | unsigned char *data = NULL; | ||
1139 | int datalen; | ||
1140 | int dataoff; | ||
1141 | int ret; | ||
1142 | |||
1143 | /* Until there's been traffic both ways, don't look in packets. */ | ||
1144 | if (ctinfo != IP_CT_ESTABLISHED && | ||
1145 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { | ||
1146 | return NF_ACCEPT; | ||
1147 | } | ||
1148 | DEBUGP("nf_ct_q931: skblen = %u\n", (*pskb)->len); | ||
1149 | |||
1150 | spin_lock_bh(&nf_h323_lock); | ||
1151 | |||
1152 | /* Process each TPKT */ | ||
1153 | while (get_tpkt_data(pskb, protoff, ct, ctinfo, | ||
1154 | &data, &datalen, &dataoff)) { | ||
1155 | DEBUGP("nf_ct_q931: TPKT len=%d ", datalen); | ||
1156 | NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); | ||
1157 | |||
1158 | /* Decode Q.931 signal */ | ||
1159 | ret = DecodeQ931(data, datalen, &q931); | ||
1160 | if (ret < 0) { | ||
1161 | if (net_ratelimit()) | ||
1162 | printk("nf_ct_q931: decoding error: %s\n", | ||
1163 | ret == H323_ERROR_BOUND ? | ||
1164 | "out of bound" : "out of range"); | ||
1165 | /* We don't drop when decoding error */ | ||
1166 | break; | ||
1167 | } | ||
1168 | |||
1169 | /* Process Q.931 signal */ | ||
1170 | if (process_q931(pskb, ct, ctinfo, &data, dataoff, &q931) < 0) | ||
1171 | goto drop; | ||
1172 | } | ||
1173 | |||
1174 | spin_unlock_bh(&nf_h323_lock); | ||
1175 | return NF_ACCEPT; | ||
1176 | |||
1177 | drop: | ||
1178 | spin_unlock_bh(&nf_h323_lock); | ||
1179 | if (net_ratelimit()) | ||
1180 | printk("nf_ct_q931: packet dropped\n"); | ||
1181 | return NF_DROP; | ||
1182 | } | ||
1183 | |||
1184 | /****************************************************************************/ | ||
1185 | static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = { | ||
1186 | { | ||
1187 | .name = "Q.931", | ||
1188 | .me = THIS_MODULE, | ||
1189 | /* T.120 and H.245 */ | ||
1190 | .max_expected = H323_RTP_CHANNEL_MAX * 4 + 4, | ||
1191 | .timeout = 240, | ||
1192 | .tuple.src.l3num = AF_INET, | ||
1193 | .tuple.src.u.tcp.port = __constant_htons(Q931_PORT), | ||
1194 | .tuple.dst.protonum = IPPROTO_TCP, | ||
1195 | .mask.src.l3num = 0xFFFF, | ||
1196 | .mask.src.u.tcp.port = __constant_htons(0xFFFF), | ||
1197 | .mask.dst.protonum = 0xFF, | ||
1198 | .help = q931_help | ||
1199 | }, | ||
1200 | { | ||
1201 | .name = "Q.931", | ||
1202 | .me = THIS_MODULE, | ||
1203 | /* T.120 and H.245 */ | ||
1204 | .max_expected = H323_RTP_CHANNEL_MAX * 4 + 4, | ||
1205 | .timeout = 240, | ||
1206 | .tuple.src.l3num = AF_INET6, | ||
1207 | .tuple.src.u.tcp.port = __constant_htons(Q931_PORT), | ||
1208 | .tuple.dst.protonum = IPPROTO_TCP, | ||
1209 | .mask.src.l3num = 0xFFFF, | ||
1210 | .mask.src.u.tcp.port = __constant_htons(0xFFFF), | ||
1211 | .mask.dst.protonum = 0xFF, | ||
1212 | .help = q931_help | ||
1213 | }, | ||
1214 | }; | ||
1215 | |||
1216 | /****************************************************************************/ | ||
1217 | static unsigned char *get_udp_data(struct sk_buff **pskb, unsigned int protoff, | ||
1218 | int *datalen) | ||
1219 | { | ||
1220 | struct udphdr _uh, *uh; | ||
1221 | int dataoff; | ||
1222 | |||
1223 | uh = skb_header_pointer(*pskb, protoff, sizeof(_uh), &_uh); | ||
1224 | if (uh == NULL) | ||
1225 | return NULL; | ||
1226 | dataoff = protoff + sizeof(_uh); | ||
1227 | if (dataoff >= (*pskb)->len) | ||
1228 | return NULL; | ||
1229 | *datalen = (*pskb)->len - dataoff; | ||
1230 | return skb_header_pointer(*pskb, dataoff, *datalen, h323_buffer); | ||
1231 | } | ||
1232 | |||
1233 | /****************************************************************************/ | ||
1234 | static struct nf_conntrack_expect *find_expect(struct nf_conn *ct, | ||
1235 | union nf_conntrack_address *addr, | ||
1236 | __be16 port) | ||
1237 | { | ||
1238 | struct nf_conntrack_expect *exp; | ||
1239 | struct nf_conntrack_tuple tuple; | ||
1240 | |||
1241 | memset(&tuple.src.u3, 0, sizeof(tuple.src.u3)); | ||
1242 | tuple.src.u.tcp.port = 0; | ||
1243 | memcpy(&tuple.dst.u3, addr, sizeof(tuple.dst.u3)); | ||
1244 | tuple.dst.u.tcp.port = port; | ||
1245 | tuple.dst.protonum = IPPROTO_TCP; | ||
1246 | |||
1247 | exp = __nf_conntrack_expect_find(&tuple); | ||
1248 | if (exp && exp->master == ct) | ||
1249 | return exp; | ||
1250 | return NULL; | ||
1251 | } | ||
1252 | |||
1253 | /****************************************************************************/ | ||
1254 | static int set_expect_timeout(struct nf_conntrack_expect *exp, | ||
1255 | unsigned timeout) | ||
1256 | { | ||
1257 | if (!exp || !del_timer(&exp->timeout)) | ||
1258 | return 0; | ||
1259 | |||
1260 | exp->timeout.expires = jiffies + timeout * HZ; | ||
1261 | add_timer(&exp->timeout); | ||
1262 | |||
1263 | return 1; | ||
1264 | } | ||
1265 | |||
1266 | /****************************************************************************/ | ||
1267 | static int expect_q931(struct sk_buff **pskb, struct nf_conn *ct, | ||
1268 | enum ip_conntrack_info ctinfo, | ||
1269 | unsigned char **data, | ||
1270 | TransportAddress *taddr, int count) | ||
1271 | { | ||
1272 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | ||
1273 | int dir = CTINFO2DIR(ctinfo); | ||
1274 | int ret = 0; | ||
1275 | int i; | ||
1276 | __be16 port; | ||
1277 | union nf_conntrack_address addr; | ||
1278 | struct nf_conntrack_expect *exp; | ||
1279 | typeof(nat_q931_hook) nat_q931; | ||
1280 | |||
1281 | /* Look for the first related address */ | ||
1282 | for (i = 0; i < count; i++) { | ||
1283 | if (get_h225_addr(ct, *data, &taddr[i], &addr, &port) && | ||
1284 | memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, | ||
1285 | sizeof(addr)) == 0 && port != 0) | ||
1286 | break; | ||
1287 | } | ||
1288 | |||
1289 | if (i >= count) /* Not found */ | ||
1290 | return 0; | ||
1291 | |||
1292 | /* Create expect for Q.931 */ | ||
1293 | if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) | ||
1294 | return -1; | ||
1295 | nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, | ||
1296 | gkrouted_only ? /* only accept calls from GK? */ | ||
1297 | &ct->tuplehash[!dir].tuple.src.u3 : | ||
1298 | NULL, | ||
1299 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
1300 | IPPROTO_TCP, NULL, &port); | ||
1301 | exp->helper = nf_conntrack_helper_q931; | ||
1302 | exp->flags = NF_CT_EXPECT_PERMANENT; /* Accept multiple calls */ | ||
1303 | |||
1304 | nat_q931 = rcu_dereference(nat_q931_hook); | ||
1305 | if (nat_q931 && ct->status & IPS_NAT_MASK) { /* Need NAT */ | ||
1306 | ret = nat_q931(pskb, ct, ctinfo, data, taddr, i, port, exp); | ||
1307 | } else { /* Conntrack only */ | ||
1308 | if (nf_conntrack_expect_related(exp) == 0) { | ||
1309 | DEBUGP("nf_ct_ras: expect Q.931 "); | ||
1310 | NF_CT_DUMP_TUPLE(&exp->tuple); | ||
1311 | |||
1312 | /* Save port for looking up expect in processing RCF */ | ||
1313 | info->sig_port[dir] = port; | ||
1314 | } else | ||
1315 | ret = -1; | ||
1316 | } | ||
1317 | |||
1318 | nf_conntrack_expect_put(exp); | ||
1319 | |||
1320 | return ret; | ||
1321 | } | ||
1322 | |||
1323 | /****************************************************************************/ | ||
1324 | static int process_grq(struct sk_buff **pskb, struct nf_conn *ct, | ||
1325 | enum ip_conntrack_info ctinfo, | ||
1326 | unsigned char **data, GatekeeperRequest *grq) | ||
1327 | { | ||
1328 | typeof(set_ras_addr_hook) set_ras_addr; | ||
1329 | |||
1330 | DEBUGP("nf_ct_ras: GRQ\n"); | ||
1331 | |||
1332 | set_ras_addr = rcu_dereference(set_ras_addr_hook); | ||
1333 | if (set_ras_addr && ct->status & IPS_NAT_MASK) /* NATed */ | ||
1334 | return set_ras_addr(pskb, ct, ctinfo, data, | ||
1335 | &grq->rasAddress, 1); | ||
1336 | return 0; | ||
1337 | } | ||
1338 | |||
1339 | /****************************************************************************/ | ||
1340 | static int process_gcf(struct sk_buff **pskb, struct nf_conn *ct, | ||
1341 | enum ip_conntrack_info ctinfo, | ||
1342 | unsigned char **data, GatekeeperConfirm *gcf) | ||
1343 | { | ||
1344 | int dir = CTINFO2DIR(ctinfo); | ||
1345 | int ret = 0; | ||
1346 | __be16 port; | ||
1347 | union nf_conntrack_address addr; | ||
1348 | struct nf_conntrack_expect *exp; | ||
1349 | |||
1350 | DEBUGP("nf_ct_ras: GCF\n"); | ||
1351 | |||
1352 | if (!get_h225_addr(ct, *data, &gcf->rasAddress, &addr, &port)) | ||
1353 | return 0; | ||
1354 | |||
1355 | /* Registration port is the same as discovery port */ | ||
1356 | if (!memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && | ||
1357 | port == ct->tuplehash[dir].tuple.src.u.udp.port) | ||
1358 | return 0; | ||
1359 | |||
1360 | /* Avoid RAS expectation loops. A GCF is never expected. */ | ||
1361 | if (test_bit(IPS_EXPECTED_BIT, &ct->status)) | ||
1362 | return 0; | ||
1363 | |||
1364 | /* Need new expect */ | ||
1365 | if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) | ||
1366 | return -1; | ||
1367 | nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, | ||
1368 | &ct->tuplehash[!dir].tuple.src.u3, &addr, | ||
1369 | IPPROTO_UDP, NULL, &port); | ||
1370 | exp->helper = nf_conntrack_helper_ras; | ||
1371 | |||
1372 | if (nf_conntrack_expect_related(exp) == 0) { | ||
1373 | DEBUGP("nf_ct_ras: expect RAS "); | ||
1374 | NF_CT_DUMP_TUPLE(&exp->tuple); | ||
1375 | } else | ||
1376 | ret = -1; | ||
1377 | |||
1378 | nf_conntrack_expect_put(exp); | ||
1379 | |||
1380 | return ret; | ||
1381 | } | ||
1382 | |||
1383 | /****************************************************************************/ | ||
1384 | static int process_rrq(struct sk_buff **pskb, struct nf_conn *ct, | ||
1385 | enum ip_conntrack_info ctinfo, | ||
1386 | unsigned char **data, RegistrationRequest *rrq) | ||
1387 | { | ||
1388 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | ||
1389 | int ret; | ||
1390 | typeof(set_ras_addr_hook) set_ras_addr; | ||
1391 | |||
1392 | DEBUGP("nf_ct_ras: RRQ\n"); | ||
1393 | |||
1394 | ret = expect_q931(pskb, ct, ctinfo, data, | ||
1395 | rrq->callSignalAddress.item, | ||
1396 | rrq->callSignalAddress.count); | ||
1397 | if (ret < 0) | ||
1398 | return -1; | ||
1399 | |||
1400 | set_ras_addr = rcu_dereference(set_ras_addr_hook); | ||
1401 | if (set_ras_addr && ct->status & IPS_NAT_MASK) { | ||
1402 | ret = set_ras_addr(pskb, ct, ctinfo, data, | ||
1403 | rrq->rasAddress.item, | ||
1404 | rrq->rasAddress.count); | ||
1405 | if (ret < 0) | ||
1406 | return -1; | ||
1407 | } | ||
1408 | |||
1409 | if (rrq->options & eRegistrationRequest_timeToLive) { | ||
1410 | DEBUGP("nf_ct_ras: RRQ TTL = %u seconds\n", rrq->timeToLive); | ||
1411 | info->timeout = rrq->timeToLive; | ||
1412 | } else | ||
1413 | info->timeout = default_rrq_ttl; | ||
1414 | |||
1415 | return 0; | ||
1416 | } | ||
1417 | |||
1418 | /****************************************************************************/ | ||
1419 | static int process_rcf(struct sk_buff **pskb, struct nf_conn *ct, | ||
1420 | enum ip_conntrack_info ctinfo, | ||
1421 | unsigned char **data, RegistrationConfirm *rcf) | ||
1422 | { | ||
1423 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | ||
1424 | int dir = CTINFO2DIR(ctinfo); | ||
1425 | int ret; | ||
1426 | struct nf_conntrack_expect *exp; | ||
1427 | typeof(set_sig_addr_hook) set_sig_addr; | ||
1428 | |||
1429 | DEBUGP("nf_ct_ras: RCF\n"); | ||
1430 | |||
1431 | set_sig_addr = rcu_dereference(set_sig_addr_hook); | ||
1432 | if (set_sig_addr && ct->status & IPS_NAT_MASK) { | ||
1433 | ret = set_sig_addr(pskb, ct, ctinfo, data, | ||
1434 | rcf->callSignalAddress.item, | ||
1435 | rcf->callSignalAddress.count); | ||
1436 | if (ret < 0) | ||
1437 | return -1; | ||
1438 | } | ||
1439 | |||
1440 | if (rcf->options & eRegistrationConfirm_timeToLive) { | ||
1441 | DEBUGP("nf_ct_ras: RCF TTL = %u seconds\n", rcf->timeToLive); | ||
1442 | info->timeout = rcf->timeToLive; | ||
1443 | } | ||
1444 | |||
1445 | if (info->timeout > 0) { | ||
1446 | DEBUGP | ||
1447 | ("nf_ct_ras: set RAS connection timeout to %u seconds\n", | ||
1448 | info->timeout); | ||
1449 | nf_ct_refresh(ct, *pskb, info->timeout * HZ); | ||
1450 | |||
1451 | /* Set expect timeout */ | ||
1452 | read_lock_bh(&nf_conntrack_lock); | ||
1453 | exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3, | ||
1454 | info->sig_port[!dir]); | ||
1455 | if (exp) { | ||
1456 | DEBUGP("nf_ct_ras: set Q.931 expect " | ||
1457 | "timeout to %u seconds for", | ||
1458 | info->timeout); | ||
1459 | NF_CT_DUMP_TUPLE(&exp->tuple); | ||
1460 | set_expect_timeout(exp, info->timeout); | ||
1461 | } | ||
1462 | read_unlock_bh(&nf_conntrack_lock); | ||
1463 | } | ||
1464 | |||
1465 | return 0; | ||
1466 | } | ||
1467 | |||
1468 | /****************************************************************************/ | ||
1469 | static int process_urq(struct sk_buff **pskb, struct nf_conn *ct, | ||
1470 | enum ip_conntrack_info ctinfo, | ||
1471 | unsigned char **data, UnregistrationRequest *urq) | ||
1472 | { | ||
1473 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | ||
1474 | int dir = CTINFO2DIR(ctinfo); | ||
1475 | int ret; | ||
1476 | typeof(set_sig_addr_hook) set_sig_addr; | ||
1477 | |||
1478 | DEBUGP("nf_ct_ras: URQ\n"); | ||
1479 | |||
1480 | set_sig_addr = rcu_dereference(set_sig_addr_hook); | ||
1481 | if (set_sig_addr && ct->status & IPS_NAT_MASK) { | ||
1482 | ret = set_sig_addr(pskb, ct, ctinfo, data, | ||
1483 | urq->callSignalAddress.item, | ||
1484 | urq->callSignalAddress.count); | ||
1485 | if (ret < 0) | ||
1486 | return -1; | ||
1487 | } | ||
1488 | |||
1489 | /* Clear old expect */ | ||
1490 | nf_ct_remove_expectations(ct); | ||
1491 | info->sig_port[dir] = 0; | ||
1492 | info->sig_port[!dir] = 0; | ||
1493 | |||
1494 | /* Give it 30 seconds for UCF or URJ */ | ||
1495 | nf_ct_refresh(ct, *pskb, 30 * HZ); | ||
1496 | |||
1497 | return 0; | ||
1498 | } | ||
1499 | |||
1500 | /****************************************************************************/ | ||
1501 | static int process_arq(struct sk_buff **pskb, struct nf_conn *ct, | ||
1502 | enum ip_conntrack_info ctinfo, | ||
1503 | unsigned char **data, AdmissionRequest *arq) | ||
1504 | { | ||
1505 | struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info; | ||
1506 | int dir = CTINFO2DIR(ctinfo); | ||
1507 | __be16 port; | ||
1508 | union nf_conntrack_address addr; | ||
1509 | typeof(set_h225_addr_hook) set_h225_addr; | ||
1510 | |||
1511 | DEBUGP("nf_ct_ras: ARQ\n"); | ||
1512 | |||
1513 | set_h225_addr = rcu_dereference(set_h225_addr_hook); | ||
1514 | if ((arq->options & eAdmissionRequest_destCallSignalAddress) && | ||
1515 | get_h225_addr(ct, *data, &arq->destCallSignalAddress, | ||
1516 | &addr, &port) && | ||
1517 | !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && | ||
1518 | port == info->sig_port[dir] && | ||
1519 | set_h225_addr && ct->status & IPS_NAT_MASK) { | ||
1520 | /* Answering ARQ */ | ||
1521 | return set_h225_addr(pskb, data, 0, | ||
1522 | &arq->destCallSignalAddress, | ||
1523 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
1524 | info->sig_port[!dir]); | ||
1525 | } | ||
1526 | |||
1527 | if ((arq->options & eAdmissionRequest_srcCallSignalAddress) && | ||
1528 | get_h225_addr(ct, *data, &arq->srcCallSignalAddress, | ||
1529 | &addr, &port) && | ||
1530 | !memcmp(&addr, &ct->tuplehash[dir].tuple.src.u3, sizeof(addr)) && | ||
1531 | set_h225_addr && ct->status & IPS_NAT_MASK) { | ||
1532 | /* Calling ARQ */ | ||
1533 | return set_h225_addr(pskb, data, 0, | ||
1534 | &arq->srcCallSignalAddress, | ||
1535 | &ct->tuplehash[!dir].tuple.dst.u3, | ||
1536 | port); | ||
1537 | } | ||
1538 | |||
1539 | return 0; | ||
1540 | } | ||
1541 | |||
1542 | /****************************************************************************/ | ||
1543 | static int process_acf(struct sk_buff **pskb, struct nf_conn *ct, | ||
1544 | enum ip_conntrack_info ctinfo, | ||
1545 | unsigned char **data, AdmissionConfirm *acf) | ||
1546 | { | ||
1547 | int dir = CTINFO2DIR(ctinfo); | ||
1548 | int ret = 0; | ||
1549 | __be16 port; | ||
1550 | union nf_conntrack_address addr; | ||
1551 | struct nf_conntrack_expect *exp; | ||
1552 | typeof(set_sig_addr_hook) set_sig_addr; | ||
1553 | |||
1554 | DEBUGP("nf_ct_ras: ACF\n"); | ||
1555 | |||
1556 | if (!get_h225_addr(ct, *data, &acf->destCallSignalAddress, | ||
1557 | &addr, &port)) | ||
1558 | return 0; | ||
1559 | |||
1560 | if (!memcmp(&addr, &ct->tuplehash[dir].tuple.dst.u3, sizeof(addr))) { | ||
1561 | /* Answering ACF */ | ||
1562 | set_sig_addr = rcu_dereference(set_sig_addr_hook); | ||
1563 | if (set_sig_addr && ct->status & IPS_NAT_MASK) | ||
1564 | return set_sig_addr(pskb, ct, ctinfo, data, | ||
1565 | &acf->destCallSignalAddress, 1); | ||
1566 | return 0; | ||
1567 | } | ||
1568 | |||
1569 | /* Need new expect */ | ||
1570 | if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) | ||
1571 | return -1; | ||
1572 | nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, | ||
1573 | &ct->tuplehash[!dir].tuple.src.u3, &addr, | ||
1574 | IPPROTO_TCP, NULL, &port); | ||
1575 | exp->flags = NF_CT_EXPECT_PERMANENT; | ||
1576 | exp->helper = nf_conntrack_helper_q931; | ||
1577 | |||
1578 | if (nf_conntrack_expect_related(exp) == 0) { | ||
1579 | DEBUGP("nf_ct_ras: expect Q.931 "); | ||
1580 | NF_CT_DUMP_TUPLE(&exp->tuple); | ||
1581 | } else | ||
1582 | ret = -1; | ||
1583 | |||
1584 | nf_conntrack_expect_put(exp); | ||
1585 | |||
1586 | return ret; | ||
1587 | } | ||
1588 | |||
1589 | /****************************************************************************/ | ||
1590 | static int process_lrq(struct sk_buff **pskb, struct nf_conn *ct, | ||
1591 | enum ip_conntrack_info ctinfo, | ||
1592 | unsigned char **data, LocationRequest *lrq) | ||
1593 | { | ||
1594 | typeof(set_ras_addr_hook) set_ras_addr; | ||
1595 | |||
1596 | DEBUGP("nf_ct_ras: LRQ\n"); | ||
1597 | |||
1598 | set_ras_addr = rcu_dereference(set_ras_addr_hook); | ||
1599 | if (set_ras_addr && ct->status & IPS_NAT_MASK) | ||
1600 | return set_ras_addr(pskb, ct, ctinfo, data, | ||
1601 | &lrq->replyAddress, 1); | ||
1602 | return 0; | ||
1603 | } | ||
1604 | |||
1605 | /****************************************************************************/ | ||
1606 | static int process_lcf(struct sk_buff **pskb, struct nf_conn *ct, | ||
1607 | enum ip_conntrack_info ctinfo, | ||
1608 | unsigned char **data, LocationConfirm *lcf) | ||
1609 | { | ||
1610 | int dir = CTINFO2DIR(ctinfo); | ||
1611 | int ret = 0; | ||
1612 | __be16 port; | ||
1613 | union nf_conntrack_address addr; | ||
1614 | struct nf_conntrack_expect *exp; | ||
1615 | |||
1616 | DEBUGP("nf_ct_ras: LCF\n"); | ||
1617 | |||
1618 | if (!get_h225_addr(ct, *data, &lcf->callSignalAddress, | ||
1619 | &addr, &port)) | ||
1620 | return 0; | ||
1621 | |||
1622 | /* Need new expect for call signal */ | ||
1623 | if ((exp = nf_conntrack_expect_alloc(ct)) == NULL) | ||
1624 | return -1; | ||
1625 | nf_conntrack_expect_init(exp, ct->tuplehash[!dir].tuple.src.l3num, | ||
1626 | &ct->tuplehash[!dir].tuple.src.u3, &addr, | ||
1627 | IPPROTO_TCP, NULL, &port); | ||
1628 | exp->flags = NF_CT_EXPECT_PERMANENT; | ||
1629 | exp->helper = nf_conntrack_helper_q931; | ||
1630 | |||
1631 | if (nf_conntrack_expect_related(exp) == 0) { | ||
1632 | DEBUGP("nf_ct_ras: expect Q.931 "); | ||
1633 | NF_CT_DUMP_TUPLE(&exp->tuple); | ||
1634 | } else | ||
1635 | ret = -1; | ||
1636 | |||
1637 | nf_conntrack_expect_put(exp); | ||
1638 | |||
1639 | /* Ignore rasAddress */ | ||
1640 | |||
1641 | return ret; | ||
1642 | } | ||
1643 | |||
1644 | /****************************************************************************/ | ||
1645 | static int process_irr(struct sk_buff **pskb, struct nf_conn *ct, | ||
1646 | enum ip_conntrack_info ctinfo, | ||
1647 | unsigned char **data, InfoRequestResponse *irr) | ||
1648 | { | ||
1649 | int ret; | ||
1650 | typeof(set_ras_addr_hook) set_ras_addr; | ||
1651 | typeof(set_sig_addr_hook) set_sig_addr; | ||
1652 | |||
1653 | DEBUGP("nf_ct_ras: IRR\n"); | ||
1654 | |||
1655 | set_ras_addr = rcu_dereference(set_ras_addr_hook); | ||
1656 | if (set_ras_addr && ct->status & IPS_NAT_MASK) { | ||
1657 | ret = set_ras_addr(pskb, ct, ctinfo, data, | ||
1658 | &irr->rasAddress, 1); | ||
1659 | if (ret < 0) | ||
1660 | return -1; | ||
1661 | } | ||
1662 | |||
1663 | set_sig_addr = rcu_dereference(set_sig_addr_hook); | ||
1664 | if (set_sig_addr && ct->status & IPS_NAT_MASK) { | ||
1665 | ret = set_sig_addr(pskb, ct, ctinfo, data, | ||
1666 | irr->callSignalAddress.item, | ||
1667 | irr->callSignalAddress.count); | ||
1668 | if (ret < 0) | ||
1669 | return -1; | ||
1670 | } | ||
1671 | |||
1672 | return 0; | ||
1673 | } | ||
1674 | |||
1675 | /****************************************************************************/ | ||
1676 | static int process_ras(struct sk_buff **pskb, struct nf_conn *ct, | ||
1677 | enum ip_conntrack_info ctinfo, | ||
1678 | unsigned char **data, RasMessage *ras) | ||
1679 | { | ||
1680 | switch (ras->choice) { | ||
1681 | case eRasMessage_gatekeeperRequest: | ||
1682 | return process_grq(pskb, ct, ctinfo, data, | ||
1683 | &ras->gatekeeperRequest); | ||
1684 | case eRasMessage_gatekeeperConfirm: | ||
1685 | return process_gcf(pskb, ct, ctinfo, data, | ||
1686 | &ras->gatekeeperConfirm); | ||
1687 | case eRasMessage_registrationRequest: | ||
1688 | return process_rrq(pskb, ct, ctinfo, data, | ||
1689 | &ras->registrationRequest); | ||
1690 | case eRasMessage_registrationConfirm: | ||
1691 | return process_rcf(pskb, ct, ctinfo, data, | ||
1692 | &ras->registrationConfirm); | ||
1693 | case eRasMessage_unregistrationRequest: | ||
1694 | return process_urq(pskb, ct, ctinfo, data, | ||
1695 | &ras->unregistrationRequest); | ||
1696 | case eRasMessage_admissionRequest: | ||
1697 | return process_arq(pskb, ct, ctinfo, data, | ||
1698 | &ras->admissionRequest); | ||
1699 | case eRasMessage_admissionConfirm: | ||
1700 | return process_acf(pskb, ct, ctinfo, data, | ||
1701 | &ras->admissionConfirm); | ||
1702 | case eRasMessage_locationRequest: | ||
1703 | return process_lrq(pskb, ct, ctinfo, data, | ||
1704 | &ras->locationRequest); | ||
1705 | case eRasMessage_locationConfirm: | ||
1706 | return process_lcf(pskb, ct, ctinfo, data, | ||
1707 | &ras->locationConfirm); | ||
1708 | case eRasMessage_infoRequestResponse: | ||
1709 | return process_irr(pskb, ct, ctinfo, data, | ||
1710 | &ras->infoRequestResponse); | ||
1711 | default: | ||
1712 | DEBUGP("nf_ct_ras: RAS message %d\n", ras->choice); | ||
1713 | break; | ||
1714 | } | ||
1715 | |||
1716 | return 0; | ||
1717 | } | ||
1718 | |||
1719 | /****************************************************************************/ | ||
1720 | static int ras_help(struct sk_buff **pskb, unsigned int protoff, | ||
1721 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | ||
1722 | { | ||
1723 | static RasMessage ras; | ||
1724 | unsigned char *data; | ||
1725 | int datalen = 0; | ||
1726 | int ret; | ||
1727 | |||
1728 | DEBUGP("nf_ct_ras: skblen = %u\n", (*pskb)->len); | ||
1729 | |||
1730 | spin_lock_bh(&nf_h323_lock); | ||
1731 | |||
1732 | /* Get UDP data */ | ||
1733 | data = get_udp_data(pskb, protoff, &datalen); | ||
1734 | if (data == NULL) | ||
1735 | goto accept; | ||
1736 | DEBUGP("nf_ct_ras: RAS message len=%d ", datalen); | ||
1737 | NF_CT_DUMP_TUPLE(&ct->tuplehash[CTINFO2DIR(ctinfo)].tuple); | ||
1738 | |||
1739 | /* Decode RAS message */ | ||
1740 | ret = DecodeRasMessage(data, datalen, &ras); | ||
1741 | if (ret < 0) { | ||
1742 | if (net_ratelimit()) | ||
1743 | printk("nf_ct_ras: decoding error: %s\n", | ||
1744 | ret == H323_ERROR_BOUND ? | ||
1745 | "out of bound" : "out of range"); | ||
1746 | goto accept; | ||
1747 | } | ||
1748 | |||
1749 | /* Process RAS message */ | ||
1750 | if (process_ras(pskb, ct, ctinfo, &data, &ras) < 0) | ||
1751 | goto drop; | ||
1752 | |||
1753 | accept: | ||
1754 | spin_unlock_bh(&nf_h323_lock); | ||
1755 | return NF_ACCEPT; | ||
1756 | |||
1757 | drop: | ||
1758 | spin_unlock_bh(&nf_h323_lock); | ||
1759 | if (net_ratelimit()) | ||
1760 | printk("nf_ct_ras: packet dropped\n"); | ||
1761 | return NF_DROP; | ||
1762 | } | ||
1763 | |||
1764 | /****************************************************************************/ | ||
1765 | static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = { | ||
1766 | { | ||
1767 | .name = "RAS", | ||
1768 | .me = THIS_MODULE, | ||
1769 | .max_expected = 32, | ||
1770 | .timeout = 240, | ||
1771 | .tuple.src.l3num = AF_INET, | ||
1772 | .tuple.src.u.udp.port = __constant_htons(RAS_PORT), | ||
1773 | .tuple.dst.protonum = IPPROTO_UDP, | ||
1774 | .mask.src.l3num = 0xFFFF, | ||
1775 | .mask.src.u.udp.port = __constant_htons(0xFFFF), | ||
1776 | .mask.dst.protonum = 0xFF, | ||
1777 | .help = ras_help, | ||
1778 | }, | ||
1779 | { | ||
1780 | .name = "RAS", | ||
1781 | .me = THIS_MODULE, | ||
1782 | .max_expected = 32, | ||
1783 | .timeout = 240, | ||
1784 | .tuple.src.l3num = AF_INET6, | ||
1785 | .tuple.src.u.udp.port = __constant_htons(RAS_PORT), | ||
1786 | .tuple.dst.protonum = IPPROTO_UDP, | ||
1787 | .mask.src.l3num = 0xFFFF, | ||
1788 | .mask.src.u.udp.port = __constant_htons(0xFFFF), | ||
1789 | .mask.dst.protonum = 0xFF, | ||
1790 | .help = ras_help, | ||
1791 | }, | ||
1792 | }; | ||
1793 | |||
1794 | /****************************************************************************/ | ||
1795 | static void __exit nf_conntrack_h323_fini(void) | ||
1796 | { | ||
1797 | nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[1]); | ||
1798 | nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]); | ||
1799 | nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]); | ||
1800 | nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]); | ||
1801 | kfree(h323_buffer); | ||
1802 | DEBUGP("nf_ct_h323: fini\n"); | ||
1803 | } | ||
1804 | |||
1805 | /****************************************************************************/ | ||
1806 | static int __init nf_conntrack_h323_init(void) | ||
1807 | { | ||
1808 | int ret; | ||
1809 | |||
1810 | h323_buffer = kmalloc(65536, GFP_KERNEL); | ||
1811 | if (!h323_buffer) | ||
1812 | return -ENOMEM; | ||
1813 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[0]); | ||
1814 | if (ret < 0) | ||
1815 | goto err1; | ||
1816 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[1]); | ||
1817 | if (ret < 0) | ||
1818 | goto err2; | ||
1819 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[0]); | ||
1820 | if (ret < 0) | ||
1821 | goto err3; | ||
1822 | ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[1]); | ||
1823 | if (ret < 0) | ||
1824 | goto err4; | ||
1825 | DEBUGP("nf_ct_h323: init success\n"); | ||
1826 | return 0; | ||
1827 | |||
1828 | err4: | ||
1829 | nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]); | ||
1830 | err3: | ||
1831 | nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]); | ||
1832 | err2: | ||
1833 | nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]); | ||
1834 | err1: | ||
1835 | return ret; | ||
1836 | } | ||
1837 | |||
1838 | /****************************************************************************/ | ||
1839 | module_init(nf_conntrack_h323_init); | ||
1840 | module_exit(nf_conntrack_h323_fini); | ||
1841 | |||
1842 | EXPORT_SYMBOL_GPL(get_h225_addr); | ||
1843 | EXPORT_SYMBOL_GPL(set_h245_addr_hook); | ||
1844 | EXPORT_SYMBOL_GPL(set_h225_addr_hook); | ||
1845 | EXPORT_SYMBOL_GPL(set_sig_addr_hook); | ||
1846 | EXPORT_SYMBOL_GPL(set_ras_addr_hook); | ||
1847 | EXPORT_SYMBOL_GPL(nat_rtp_rtcp_hook); | ||
1848 | EXPORT_SYMBOL_GPL(nat_t120_hook); | ||
1849 | EXPORT_SYMBOL_GPL(nat_h245_hook); | ||
1850 | EXPORT_SYMBOL_GPL(nat_callforwarding_hook); | ||
1851 | EXPORT_SYMBOL_GPL(nat_q931_hook); | ||
1852 | |||
1853 | MODULE_AUTHOR("Jing Min Zhao <zhaojingmin@users.sourceforge.net>"); | ||
1854 | MODULE_DESCRIPTION("H.323 connection tracking helper"); | ||
1855 | MODULE_LICENSE("GPL"); | ||
1856 | MODULE_ALIAS("ip_conntrack_h323"); | ||
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323_types.c b/net/netfilter/nf_conntrack_h323_types.c index 4b359618bedd..4c6f8b3b1208 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_h323_types.c +++ b/net/netfilter/nf_conntrack_h323_types.c | |||
@@ -36,7 +36,8 @@ static field_t _TransportAddress_ipxAddress[] = { /* SEQUENCE */ | |||
36 | }; | 36 | }; |
37 | 37 | ||
38 | static field_t _TransportAddress_ip6Address[] = { /* SEQUENCE */ | 38 | static field_t _TransportAddress_ip6Address[] = { /* SEQUENCE */ |
39 | {FNAME("ip") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL}, | 39 | {FNAME("ip") OCTSTR, FIXD, 16, 0, DECODE, |
40 | offsetof(TransportAddress_ip6Address, ip6), NULL}, | ||
40 | {FNAME("port") INT, WORD, 0, 0, SKIP, 0, NULL}, | 41 | {FNAME("port") INT, WORD, 0, 0, SKIP, 0, NULL}, |
41 | }; | 42 | }; |
42 | 43 | ||
@@ -65,8 +66,8 @@ static field_t _TransportAddress[] = { /* CHOICE */ | |||
65 | _TransportAddress_ipSourceRoute}, | 66 | _TransportAddress_ipSourceRoute}, |
66 | {FNAME("ipxAddress") SEQ, 0, 3, 3, SKIP, 0, | 67 | {FNAME("ipxAddress") SEQ, 0, 3, 3, SKIP, 0, |
67 | _TransportAddress_ipxAddress}, | 68 | _TransportAddress_ipxAddress}, |
68 | {FNAME("ip6Address") SEQ, 0, 2, 2, SKIP | EXT, 0, | 69 | {FNAME("ip6Address") SEQ, 0, 2, 2, DECODE | EXT, |
69 | _TransportAddress_ip6Address}, | 70 | offsetof(TransportAddress, ip6Address), _TransportAddress_ip6Address}, |
70 | {FNAME("netBios") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL}, | 71 | {FNAME("netBios") OCTSTR, FIXD, 16, 0, SKIP, 0, NULL}, |
71 | {FNAME("nsap") OCTSTR, 5, 1, 0, SKIP, 0, NULL}, | 72 | {FNAME("nsap") OCTSTR, 5, 1, 0, SKIP, 0, NULL}, |
72 | {FNAME("nonStandardAddress") SEQ, 0, 2, 2, SKIP, 0, | 73 | {FNAME("nonStandardAddress") SEQ, 0, 2, 2, SKIP, 0, |
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c new file mode 100644 index 000000000000..0743be4434b0 --- /dev/null +++ b/net/netfilter/nf_conntrack_helper.c | |||
@@ -0,0 +1,155 @@ | |||
1 | /* Helper handling for netfilter. */ | ||
2 | |||
3 | /* (C) 1999-2001 Paul `Rusty' Russell | ||
4 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
5 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/netfilter.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/skbuff.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/stddef.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/random.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/netdevice.h> | ||
23 | |||
24 | #include <net/netfilter/nf_conntrack.h> | ||
25 | #include <net/netfilter/nf_conntrack_l3proto.h> | ||
26 | #include <net/netfilter/nf_conntrack_l4proto.h> | ||
27 | #include <net/netfilter/nf_conntrack_helper.h> | ||
28 | #include <net/netfilter/nf_conntrack_core.h> | ||
29 | |||
30 | static __read_mostly LIST_HEAD(helpers); | ||
31 | |||
32 | struct nf_conntrack_helper * | ||
33 | __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple) | ||
34 | { | ||
35 | struct nf_conntrack_helper *h; | ||
36 | |||
37 | list_for_each_entry(h, &helpers, list) { | ||
38 | if (nf_ct_tuple_mask_cmp(tuple, &h->tuple, &h->mask)) | ||
39 | return h; | ||
40 | } | ||
41 | return NULL; | ||
42 | } | ||
43 | |||
44 | struct nf_conntrack_helper * | ||
45 | nf_ct_helper_find_get( const struct nf_conntrack_tuple *tuple) | ||
46 | { | ||
47 | struct nf_conntrack_helper *helper; | ||
48 | |||
49 | /* need nf_conntrack_lock to assure that helper exists until | ||
50 | * try_module_get() is called */ | ||
51 | read_lock_bh(&nf_conntrack_lock); | ||
52 | |||
53 | helper = __nf_ct_helper_find(tuple); | ||
54 | if (helper) { | ||
55 | /* need to increase module usage count to assure helper will | ||
56 | * not go away while the caller is e.g. busy putting a | ||
57 | * conntrack in the hash that uses the helper */ | ||
58 | if (!try_module_get(helper->me)) | ||
59 | helper = NULL; | ||
60 | } | ||
61 | |||
62 | read_unlock_bh(&nf_conntrack_lock); | ||
63 | |||
64 | return helper; | ||
65 | } | ||
66 | EXPORT_SYMBOL_GPL(nf_ct_helper_find_get); | ||
67 | |||
68 | void nf_ct_helper_put(struct nf_conntrack_helper *helper) | ||
69 | { | ||
70 | module_put(helper->me); | ||
71 | } | ||
72 | EXPORT_SYMBOL_GPL(nf_ct_helper_put); | ||
73 | |||
74 | struct nf_conntrack_helper * | ||
75 | __nf_conntrack_helper_find_byname(const char *name) | ||
76 | { | ||
77 | struct nf_conntrack_helper *h; | ||
78 | |||
79 | list_for_each_entry(h, &helpers, list) { | ||
80 | if (!strcmp(h->name, name)) | ||
81 | return h; | ||
82 | } | ||
83 | |||
84 | return NULL; | ||
85 | } | ||
86 | EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find_byname); | ||
87 | |||
88 | static inline int unhelp(struct nf_conntrack_tuple_hash *i, | ||
89 | const struct nf_conntrack_helper *me) | ||
90 | { | ||
91 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i); | ||
92 | struct nf_conn_help *help = nfct_help(ct); | ||
93 | |||
94 | if (help && help->helper == me) { | ||
95 | nf_conntrack_event(IPCT_HELPER, ct); | ||
96 | help->helper = NULL; | ||
97 | } | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | int nf_conntrack_helper_register(struct nf_conntrack_helper *me) | ||
102 | { | ||
103 | int size, ret; | ||
104 | |||
105 | BUG_ON(me->timeout == 0); | ||
106 | |||
107 | size = ALIGN(sizeof(struct nf_conn), __alignof__(struct nf_conn_help)) + | ||
108 | sizeof(struct nf_conn_help); | ||
109 | ret = nf_conntrack_register_cache(NF_CT_F_HELP, "nf_conntrack:help", | ||
110 | size); | ||
111 | if (ret < 0) { | ||
112 | printk(KERN_ERR "nf_conntrack_helper_register: Unable to create slab cache for conntracks\n"); | ||
113 | return ret; | ||
114 | } | ||
115 | write_lock_bh(&nf_conntrack_lock); | ||
116 | list_add(&me->list, &helpers); | ||
117 | write_unlock_bh(&nf_conntrack_lock); | ||
118 | |||
119 | return 0; | ||
120 | } | ||
121 | EXPORT_SYMBOL_GPL(nf_conntrack_helper_register); | ||
122 | |||
123 | void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) | ||
124 | { | ||
125 | unsigned int i; | ||
126 | struct nf_conntrack_tuple_hash *h; | ||
127 | struct nf_conntrack_expect *exp, *tmp; | ||
128 | |||
129 | /* Need write lock here, to delete helper. */ | ||
130 | write_lock_bh(&nf_conntrack_lock); | ||
131 | list_del(&me->list); | ||
132 | |||
133 | /* Get rid of expectations */ | ||
134 | list_for_each_entry_safe(exp, tmp, &nf_conntrack_expect_list, list) { | ||
135 | struct nf_conn_help *help = nfct_help(exp->master); | ||
136 | if ((help->helper == me || exp->helper == me) && | ||
137 | del_timer(&exp->timeout)) { | ||
138 | nf_ct_unlink_expect(exp); | ||
139 | nf_conntrack_expect_put(exp); | ||
140 | } | ||
141 | } | ||
142 | |||
143 | /* Get rid of expecteds, set helpers to NULL. */ | ||
144 | list_for_each_entry(h, &unconfirmed, list) | ||
145 | unhelp(h, me); | ||
146 | for (i = 0; i < nf_conntrack_htable_size; i++) { | ||
147 | list_for_each_entry(h, &nf_conntrack_hash[i], list) | ||
148 | unhelp(h, me); | ||
149 | } | ||
150 | write_unlock_bh(&nf_conntrack_lock); | ||
151 | |||
152 | /* Someone could be still looking at the helper in a bh. */ | ||
153 | synchronize_net(); | ||
154 | } | ||
155 | EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); | ||
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c new file mode 100644 index 000000000000..ed01db634399 --- /dev/null +++ b/net/netfilter/nf_conntrack_irc.c | |||
@@ -0,0 +1,281 @@ | |||
1 | /* IRC extension for IP connection tracking, Version 1.21 | ||
2 | * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org> | ||
3 | * based on RR's ip_conntrack_ftp.c | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/moduleparam.h> | ||
13 | #include <linux/skbuff.h> | ||
14 | #include <linux/in.h> | ||
15 | #include <linux/tcp.h> | ||
16 | #include <linux/netfilter.h> | ||
17 | |||
18 | #include <net/netfilter/nf_conntrack.h> | ||
19 | #include <net/netfilter/nf_conntrack_expect.h> | ||
20 | #include <net/netfilter/nf_conntrack_helper.h> | ||
21 | #include <linux/netfilter/nf_conntrack_irc.h> | ||
22 | |||
23 | #define MAX_PORTS 8 | ||
24 | static unsigned short ports[MAX_PORTS]; | ||
25 | static int ports_c; | ||
26 | static unsigned int max_dcc_channels = 8; | ||
27 | static unsigned int dcc_timeout __read_mostly = 300; | ||
28 | /* This is slow, but it's simple. --RR */ | ||
29 | static char *irc_buffer; | ||
30 | static DEFINE_SPINLOCK(irc_buffer_lock); | ||
31 | |||
32 | unsigned int (*nf_nat_irc_hook)(struct sk_buff **pskb, | ||
33 | enum ip_conntrack_info ctinfo, | ||
34 | unsigned int matchoff, | ||
35 | unsigned int matchlen, | ||
36 | struct nf_conntrack_expect *exp) __read_mostly; | ||
37 | EXPORT_SYMBOL_GPL(nf_nat_irc_hook); | ||
38 | |||
39 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | ||
40 | MODULE_DESCRIPTION("IRC (DCC) connection tracking helper"); | ||
41 | MODULE_LICENSE("GPL"); | ||
42 | MODULE_ALIAS("ip_conntrack_irc"); | ||
43 | |||
44 | module_param_array(ports, ushort, &ports_c, 0400); | ||
45 | MODULE_PARM_DESC(ports, "port numbers of IRC servers"); | ||
46 | module_param(max_dcc_channels, uint, 0400); | ||
47 | MODULE_PARM_DESC(max_dcc_channels, "max number of expected DCC channels per " | ||
48 | "IRC session"); | ||
49 | module_param(dcc_timeout, uint, 0400); | ||
50 | MODULE_PARM_DESC(dcc_timeout, "timeout on for unestablished DCC channels"); | ||
51 | |||
52 | static const char *dccprotos[] = { | ||
53 | "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT " | ||
54 | }; | ||
55 | |||
56 | #define MINMATCHLEN 5 | ||
57 | |||
58 | #if 0 | ||
59 | #define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s:" format, \ | ||
60 | __FILE__, __FUNCTION__ , ## args) | ||
61 | #else | ||
62 | #define DEBUGP(format, args...) | ||
63 | #endif | ||
64 | |||
65 | /* tries to get the ip_addr and port out of a dcc command | ||
66 | * return value: -1 on failure, 0 on success | ||
67 | * data pointer to first byte of DCC command data | ||
68 | * data_end pointer to last byte of dcc command data | ||
69 | * ip returns parsed ip of dcc command | ||
70 | * port returns parsed port of dcc command | ||
71 | * ad_beg_p returns pointer to first byte of addr data | ||
72 | * ad_end_p returns pointer to last byte of addr data | ||
73 | */ | ||
74 | static int parse_dcc(char *data, char *data_end, u_int32_t *ip, | ||
75 | u_int16_t *port, char **ad_beg_p, char **ad_end_p) | ||
76 | { | ||
77 | /* at least 12: "AAAAAAAA P\1\n" */ | ||
78 | while (*data++ != ' ') | ||
79 | if (data > data_end - 12) | ||
80 | return -1; | ||
81 | |||
82 | *ad_beg_p = data; | ||
83 | *ip = simple_strtoul(data, &data, 10); | ||
84 | |||
85 | /* skip blanks between ip and port */ | ||
86 | while (*data == ' ') { | ||
87 | if (data >= data_end) | ||
88 | return -1; | ||
89 | data++; | ||
90 | } | ||
91 | |||
92 | *port = simple_strtoul(data, &data, 10); | ||
93 | *ad_end_p = data; | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static int help(struct sk_buff **pskb, unsigned int protoff, | ||
99 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | ||
100 | { | ||
101 | unsigned int dataoff; | ||
102 | struct tcphdr _tcph, *th; | ||
103 | char *data, *data_limit, *ib_ptr; | ||
104 | int dir = CTINFO2DIR(ctinfo); | ||
105 | struct nf_conntrack_expect *exp; | ||
106 | struct nf_conntrack_tuple *tuple; | ||
107 | u_int32_t dcc_ip; | ||
108 | u_int16_t dcc_port; | ||
109 | __be16 port; | ||
110 | int i, ret = NF_ACCEPT; | ||
111 | char *addr_beg_p, *addr_end_p; | ||
112 | typeof(nf_nat_irc_hook) nf_nat_irc; | ||
113 | |||
114 | /* If packet is coming from IRC server */ | ||
115 | if (dir == IP_CT_DIR_REPLY) | ||
116 | return NF_ACCEPT; | ||
117 | |||
118 | /* Until there's been traffic both ways, don't look in packets. */ | ||
119 | if (ctinfo != IP_CT_ESTABLISHED && | ||
120 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) | ||
121 | return NF_ACCEPT; | ||
122 | |||
123 | /* Not a full tcp header? */ | ||
124 | th = skb_header_pointer(*pskb, protoff, sizeof(_tcph), &_tcph); | ||
125 | if (th == NULL) | ||
126 | return NF_ACCEPT; | ||
127 | |||
128 | /* No data? */ | ||
129 | dataoff = protoff + th->doff*4; | ||
130 | if (dataoff >= (*pskb)->len) | ||
131 | return NF_ACCEPT; | ||
132 | |||
133 | spin_lock_bh(&irc_buffer_lock); | ||
134 | ib_ptr = skb_header_pointer(*pskb, dataoff, (*pskb)->len - dataoff, | ||
135 | irc_buffer); | ||
136 | BUG_ON(ib_ptr == NULL); | ||
137 | |||
138 | data = ib_ptr; | ||
139 | data_limit = ib_ptr + (*pskb)->len - dataoff; | ||
140 | |||
141 | /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24 | ||
142 | * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */ | ||
143 | while (data < data_limit - (19 + MINMATCHLEN)) { | ||
144 | if (memcmp(data, "\1DCC ", 5)) { | ||
145 | data++; | ||
146 | continue; | ||
147 | } | ||
148 | data += 5; | ||
149 | /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */ | ||
150 | |||
151 | DEBUGP("DCC found in master %u.%u.%u.%u:%u %u.%u.%u.%u:%u...\n", | ||
152 | NIPQUAD(iph->saddr), ntohs(th->source), | ||
153 | NIPQUAD(iph->daddr), ntohs(th->dest)); | ||
154 | |||
155 | for (i = 0; i < ARRAY_SIZE(dccprotos); i++) { | ||
156 | if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) { | ||
157 | /* no match */ | ||
158 | continue; | ||
159 | } | ||
160 | data += strlen(dccprotos[i]); | ||
161 | DEBUGP("DCC %s detected\n", dccprotos[i]); | ||
162 | |||
163 | /* we have at least | ||
164 | * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid | ||
165 | * data left (== 14/13 bytes) */ | ||
166 | if (parse_dcc((char *)data, data_limit, &dcc_ip, | ||
167 | &dcc_port, &addr_beg_p, &addr_end_p)) { | ||
168 | DEBUGP("unable to parse dcc command\n"); | ||
169 | continue; | ||
170 | } | ||
171 | DEBUGP("DCC bound ip/port: %u.%u.%u.%u:%u\n", | ||
172 | HIPQUAD(dcc_ip), dcc_port); | ||
173 | |||
174 | /* dcc_ip can be the internal OR external (NAT'ed) IP */ | ||
175 | tuple = &ct->tuplehash[dir].tuple; | ||
176 | if (tuple->src.u3.ip != htonl(dcc_ip) && | ||
177 | tuple->dst.u3.ip != htonl(dcc_ip)) { | ||
178 | if (net_ratelimit()) | ||
179 | printk(KERN_WARNING | ||
180 | "Forged DCC command from " | ||
181 | "%u.%u.%u.%u: %u.%u.%u.%u:%u\n", | ||
182 | NIPQUAD(tuple->src.u3.ip), | ||
183 | HIPQUAD(dcc_ip), dcc_port); | ||
184 | continue; | ||
185 | } | ||
186 | |||
187 | exp = nf_conntrack_expect_alloc(ct); | ||
188 | if (exp == NULL) { | ||
189 | ret = NF_DROP; | ||
190 | goto out; | ||
191 | } | ||
192 | tuple = &ct->tuplehash[!dir].tuple; | ||
193 | port = htons(dcc_port); | ||
194 | nf_conntrack_expect_init(exp, tuple->src.l3num, | ||
195 | NULL, &tuple->dst.u3, | ||
196 | IPPROTO_TCP, NULL, &port); | ||
197 | |||
198 | nf_nat_irc = rcu_dereference(nf_nat_irc_hook); | ||
199 | if (nf_nat_irc && ct->status & IPS_NAT_MASK) | ||
200 | ret = nf_nat_irc(pskb, ctinfo, | ||
201 | addr_beg_p - ib_ptr, | ||
202 | addr_end_p - addr_beg_p, | ||
203 | exp); | ||
204 | else if (nf_conntrack_expect_related(exp) != 0) | ||
205 | ret = NF_DROP; | ||
206 | nf_conntrack_expect_put(exp); | ||
207 | goto out; | ||
208 | } | ||
209 | } | ||
210 | out: | ||
211 | spin_unlock_bh(&irc_buffer_lock); | ||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly; | ||
216 | static char irc_names[MAX_PORTS][sizeof("irc-65535")] __read_mostly; | ||
217 | |||
218 | static void nf_conntrack_irc_fini(void); | ||
219 | |||
220 | static int __init nf_conntrack_irc_init(void) | ||
221 | { | ||
222 | int i, ret; | ||
223 | char *tmpname; | ||
224 | |||
225 | if (max_dcc_channels < 1) { | ||
226 | printk("nf_ct_irc: max_dcc_channels must not be zero\n"); | ||
227 | return -EINVAL; | ||
228 | } | ||
229 | |||
230 | irc_buffer = kmalloc(65536, GFP_KERNEL); | ||
231 | if (!irc_buffer) | ||
232 | return -ENOMEM; | ||
233 | |||
234 | /* If no port given, default to standard irc port */ | ||
235 | if (ports_c == 0) | ||
236 | ports[ports_c++] = IRC_PORT; | ||
237 | |||
238 | for (i = 0; i < ports_c; i++) { | ||
239 | irc[i].tuple.src.l3num = AF_INET; | ||
240 | irc[i].tuple.src.u.tcp.port = htons(ports[i]); | ||
241 | irc[i].tuple.dst.protonum = IPPROTO_TCP; | ||
242 | irc[i].mask.src.l3num = 0xFFFF; | ||
243 | irc[i].mask.src.u.tcp.port = htons(0xFFFF); | ||
244 | irc[i].mask.dst.protonum = 0xFF; | ||
245 | irc[i].max_expected = max_dcc_channels; | ||
246 | irc[i].timeout = dcc_timeout; | ||
247 | irc[i].me = THIS_MODULE; | ||
248 | irc[i].help = help; | ||
249 | |||
250 | tmpname = &irc_names[i][0]; | ||
251 | if (ports[i] == IRC_PORT) | ||
252 | sprintf(tmpname, "irc"); | ||
253 | else | ||
254 | sprintf(tmpname, "irc-%u", i); | ||
255 | irc[i].name = tmpname; | ||
256 | |||
257 | ret = nf_conntrack_helper_register(&irc[i]); | ||
258 | if (ret) { | ||
259 | printk("nf_ct_irc: failed to register helper " | ||
260 | "for pf: %u port: %u\n", | ||
261 | irc[i].tuple.src.l3num, ports[i]); | ||
262 | nf_conntrack_irc_fini(); | ||
263 | return ret; | ||
264 | } | ||
265 | } | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | /* This function is intentionally _NOT_ defined as __exit, because | ||
270 | * it is needed by the init function */ | ||
271 | static void nf_conntrack_irc_fini(void) | ||
272 | { | ||
273 | int i; | ||
274 | |||
275 | for (i = 0; i < ports_c; i++) | ||
276 | nf_conntrack_helper_unregister(&irc[i]); | ||
277 | kfree(irc_buffer); | ||
278 | } | ||
279 | |||
280 | module_init(nf_conntrack_irc_init); | ||
281 | module_exit(nf_conntrack_irc_fini); | ||
diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c index 21e0bc91cf23..a3d31c3ac8e6 100644 --- a/net/netfilter/nf_conntrack_l3proto_generic.c +++ b/net/netfilter/nf_conntrack_l3proto_generic.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #include <linux/netfilter_ipv4.h> | 27 | #include <linux/netfilter_ipv4.h> |
28 | #include <net/netfilter/nf_conntrack.h> | 28 | #include <net/netfilter/nf_conntrack.h> |
29 | #include <net/netfilter/nf_conntrack_protocol.h> | 29 | #include <net/netfilter/nf_conntrack_l4proto.h> |
30 | #include <net/netfilter/nf_conntrack_l3proto.h> | 30 | #include <net/netfilter/nf_conntrack_l3proto.h> |
31 | #include <net/netfilter/nf_conntrack_core.h> | 31 | #include <net/netfilter/nf_conntrack_core.h> |
32 | #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> | 32 | #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> |
@@ -37,8 +37,6 @@ | |||
37 | #define DEBUGP(format, args...) | 37 | #define DEBUGP(format, args...) |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | DECLARE_PER_CPU(struct nf_conntrack_stat, nf_conntrack_stat); | ||
41 | |||
42 | static int generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, | 40 | static int generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff, |
43 | struct nf_conntrack_tuple *tuple) | 41 | struct nf_conntrack_tuple *tuple) |
44 | { | 42 | { |
@@ -84,7 +82,7 @@ static u_int32_t generic_get_features(const struct nf_conntrack_tuple *tuple) | |||
84 | return NF_CT_F_BASIC; | 82 | return NF_CT_F_BASIC; |
85 | } | 83 | } |
86 | 84 | ||
87 | struct nf_conntrack_l3proto nf_conntrack_generic_l3proto = { | 85 | struct nf_conntrack_l3proto nf_conntrack_l3proto_generic = { |
88 | .l3proto = PF_UNSPEC, | 86 | .l3proto = PF_UNSPEC, |
89 | .name = "unknown", | 87 | .name = "unknown", |
90 | .pkt_to_tuple = generic_pkt_to_tuple, | 88 | .pkt_to_tuple = generic_pkt_to_tuple, |
@@ -94,3 +92,4 @@ struct nf_conntrack_l3proto nf_conntrack_generic_l3proto = { | |||
94 | .prepare = generic_prepare, | 92 | .prepare = generic_prepare, |
95 | .get_features = generic_get_features, | 93 | .get_features = generic_get_features, |
96 | }; | 94 | }; |
95 | EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic); | ||
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c new file mode 100644 index 000000000000..a5b234e444dc --- /dev/null +++ b/net/netfilter/nf_conntrack_netbios_ns.c | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * NetBIOS name service broadcast connection tracking helper | ||
3 | * | ||
4 | * (c) 2005 Patrick McHardy <kaber@trash.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | /* | ||
12 | * This helper tracks locally originating NetBIOS name service | ||
13 | * requests by issuing permanent expectations (valid until | ||
14 | * timing out) matching all reply connections from the | ||
15 | * destination network. The only NetBIOS specific thing is | ||
16 | * actually the port number. | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/skbuff.h> | ||
22 | #include <linux/netdevice.h> | ||
23 | #include <linux/inetdevice.h> | ||
24 | #include <linux/if_addr.h> | ||
25 | #include <linux/in.h> | ||
26 | #include <linux/ip.h> | ||
27 | #include <linux/netfilter.h> | ||
28 | #include <net/route.h> | ||
29 | |||
30 | #include <net/netfilter/nf_conntrack.h> | ||
31 | #include <net/netfilter/nf_conntrack_helper.h> | ||
32 | #include <net/netfilter/nf_conntrack_expect.h> | ||
33 | |||
34 | #define NMBD_PORT 137 | ||
35 | |||
36 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); | ||
37 | MODULE_DESCRIPTION("NetBIOS name service broadcast connection tracking helper"); | ||
38 | MODULE_LICENSE("GPL"); | ||
39 | MODULE_ALIAS("ip_conntrack_netbios_ns"); | ||
40 | |||
41 | static unsigned int timeout __read_mostly = 3; | ||
42 | module_param(timeout, uint, 0400); | ||
43 | MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds"); | ||
44 | |||
45 | static int help(struct sk_buff **pskb, unsigned int protoff, | ||
46 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | ||
47 | { | ||
48 | struct nf_conntrack_expect *exp; | ||
49 | struct iphdr *iph = (*pskb)->nh.iph; | ||
50 | struct rtable *rt = (struct rtable *)(*pskb)->dst; | ||
51 | struct in_device *in_dev; | ||
52 | __be32 mask = 0; | ||
53 | |||
54 | /* we're only interested in locally generated packets */ | ||
55 | if ((*pskb)->sk == NULL) | ||
56 | goto out; | ||
57 | if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST)) | ||
58 | goto out; | ||
59 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) | ||
60 | goto out; | ||
61 | |||
62 | rcu_read_lock(); | ||
63 | in_dev = __in_dev_get_rcu(rt->u.dst.dev); | ||
64 | if (in_dev != NULL) { | ||
65 | for_primary_ifa(in_dev) { | ||
66 | if (ifa->ifa_broadcast == iph->daddr) { | ||
67 | mask = ifa->ifa_mask; | ||
68 | break; | ||
69 | } | ||
70 | } endfor_ifa(in_dev); | ||
71 | } | ||
72 | rcu_read_unlock(); | ||
73 | |||
74 | if (mask == 0) | ||
75 | goto out; | ||
76 | |||
77 | exp = nf_conntrack_expect_alloc(ct); | ||
78 | if (exp == NULL) | ||
79 | goto out; | ||
80 | |||
81 | exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; | ||
82 | exp->tuple.src.u.udp.port = htons(NMBD_PORT); | ||
83 | |||
84 | exp->mask.src.u3.ip = mask; | ||
85 | exp->mask.src.u.udp.port = htons(0xFFFF); | ||
86 | exp->mask.dst.u3.ip = htonl(0xFFFFFFFF); | ||
87 | exp->mask.dst.u.udp.port = htons(0xFFFF); | ||
88 | exp->mask.dst.protonum = 0xFF; | ||
89 | |||
90 | exp->expectfn = NULL; | ||
91 | exp->flags = NF_CT_EXPECT_PERMANENT; | ||
92 | |||
93 | nf_conntrack_expect_related(exp); | ||
94 | nf_conntrack_expect_put(exp); | ||
95 | |||
96 | nf_ct_refresh(ct, *pskb, timeout * HZ); | ||
97 | out: | ||
98 | return NF_ACCEPT; | ||
99 | } | ||
100 | |||
101 | static struct nf_conntrack_helper helper __read_mostly = { | ||
102 | .name = "netbios-ns", | ||
103 | .tuple.src.l3num = AF_INET, | ||
104 | .tuple.src.u.udp.port = __constant_htons(NMBD_PORT), | ||
105 | .tuple.dst.protonum = IPPROTO_UDP, | ||
106 | .mask.src.l3num = 0xFFFF, | ||
107 | .mask.src.u.udp.port = __constant_htons(0xFFFF), | ||
108 | .mask.dst.protonum = 0xFF, | ||
109 | .max_expected = 1, | ||
110 | .me = THIS_MODULE, | ||
111 | .help = help, | ||
112 | }; | ||
113 | |||
114 | static int __init nf_conntrack_netbios_ns_init(void) | ||
115 | { | ||
116 | helper.timeout = timeout; | ||
117 | return nf_conntrack_helper_register(&helper); | ||
118 | } | ||
119 | |||
120 | static void __exit nf_conntrack_netbios_ns_fini(void) | ||
121 | { | ||
122 | nf_conntrack_helper_unregister(&helper); | ||
123 | } | ||
124 | |||
125 | module_init(nf_conntrack_netbios_ns_init); | ||
126 | module_exit(nf_conntrack_netbios_ns_fini); | ||
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index bd0156a28ecd..bd1d2de75e45 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -35,10 +35,15 @@ | |||
35 | #include <linux/netfilter.h> | 35 | #include <linux/netfilter.h> |
36 | #include <net/netfilter/nf_conntrack.h> | 36 | #include <net/netfilter/nf_conntrack.h> |
37 | #include <net/netfilter/nf_conntrack_core.h> | 37 | #include <net/netfilter/nf_conntrack_core.h> |
38 | #include <net/netfilter/nf_conntrack_expect.h> | ||
38 | #include <net/netfilter/nf_conntrack_helper.h> | 39 | #include <net/netfilter/nf_conntrack_helper.h> |
39 | #include <net/netfilter/nf_conntrack_l3proto.h> | 40 | #include <net/netfilter/nf_conntrack_l3proto.h> |
40 | #include <net/netfilter/nf_conntrack_protocol.h> | 41 | #include <net/netfilter/nf_conntrack_l4proto.h> |
41 | #include <linux/netfilter_ipv4/ip_nat_protocol.h> | 42 | #include <net/netfilter/nf_conntrack_tuple.h> |
43 | #ifdef CONFIG_NF_NAT_NEEDED | ||
44 | #include <net/netfilter/nf_nat_core.h> | ||
45 | #include <net/netfilter/nf_nat_protocol.h> | ||
46 | #endif | ||
42 | 47 | ||
43 | #include <linux/netfilter/nfnetlink.h> | 48 | #include <linux/netfilter/nfnetlink.h> |
44 | #include <linux/netfilter/nfnetlink_conntrack.h> | 49 | #include <linux/netfilter/nfnetlink_conntrack.h> |
@@ -50,15 +55,15 @@ static char __initdata version[] = "0.93"; | |||
50 | static inline int | 55 | static inline int |
51 | ctnetlink_dump_tuples_proto(struct sk_buff *skb, | 56 | ctnetlink_dump_tuples_proto(struct sk_buff *skb, |
52 | const struct nf_conntrack_tuple *tuple, | 57 | const struct nf_conntrack_tuple *tuple, |
53 | struct nf_conntrack_protocol *proto) | 58 | struct nf_conntrack_l4proto *l4proto) |
54 | { | 59 | { |
55 | int ret = 0; | 60 | int ret = 0; |
56 | struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_PROTO); | 61 | struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_PROTO); |
57 | 62 | ||
58 | NFA_PUT(skb, CTA_PROTO_NUM, sizeof(u_int8_t), &tuple->dst.protonum); | 63 | NFA_PUT(skb, CTA_PROTO_NUM, sizeof(u_int8_t), &tuple->dst.protonum); |
59 | 64 | ||
60 | if (likely(proto->tuple_to_nfattr)) | 65 | if (likely(l4proto->tuple_to_nfattr)) |
61 | ret = proto->tuple_to_nfattr(skb, tuple); | 66 | ret = l4proto->tuple_to_nfattr(skb, tuple); |
62 | 67 | ||
63 | NFA_NEST_END(skb, nest_parms); | 68 | NFA_NEST_END(skb, nest_parms); |
64 | 69 | ||
@@ -93,7 +98,7 @@ ctnetlink_dump_tuples(struct sk_buff *skb, | |||
93 | { | 98 | { |
94 | int ret; | 99 | int ret; |
95 | struct nf_conntrack_l3proto *l3proto; | 100 | struct nf_conntrack_l3proto *l3proto; |
96 | struct nf_conntrack_protocol *proto; | 101 | struct nf_conntrack_l4proto *l4proto; |
97 | 102 | ||
98 | l3proto = nf_ct_l3proto_find_get(tuple->src.l3num); | 103 | l3proto = nf_ct_l3proto_find_get(tuple->src.l3num); |
99 | ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto); | 104 | ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto); |
@@ -102,9 +107,9 @@ ctnetlink_dump_tuples(struct sk_buff *skb, | |||
102 | if (unlikely(ret < 0)) | 107 | if (unlikely(ret < 0)) |
103 | return ret; | 108 | return ret; |
104 | 109 | ||
105 | proto = nf_ct_proto_find_get(tuple->src.l3num, tuple->dst.protonum); | 110 | l4proto = nf_ct_l4proto_find_get(tuple->src.l3num, tuple->dst.protonum); |
106 | ret = ctnetlink_dump_tuples_proto(skb, tuple, proto); | 111 | ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto); |
107 | nf_ct_proto_put(proto); | 112 | nf_ct_l4proto_put(l4proto); |
108 | 113 | ||
109 | return ret; | 114 | return ret; |
110 | } | 115 | } |
@@ -112,7 +117,7 @@ ctnetlink_dump_tuples(struct sk_buff *skb, | |||
112 | static inline int | 117 | static inline int |
113 | ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) | 118 | ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) |
114 | { | 119 | { |
115 | u_int32_t status = htonl((u_int32_t) ct->status); | 120 | __be32 status = htonl((u_int32_t) ct->status); |
116 | NFA_PUT(skb, CTA_STATUS, sizeof(status), &status); | 121 | NFA_PUT(skb, CTA_STATUS, sizeof(status), &status); |
117 | return 0; | 122 | return 0; |
118 | 123 | ||
@@ -124,7 +129,7 @@ static inline int | |||
124 | ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) | 129 | ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) |
125 | { | 130 | { |
126 | long timeout_l = ct->timeout.expires - jiffies; | 131 | long timeout_l = ct->timeout.expires - jiffies; |
127 | u_int32_t timeout; | 132 | __be32 timeout; |
128 | 133 | ||
129 | if (timeout_l < 0) | 134 | if (timeout_l < 0) |
130 | timeout = 0; | 135 | timeout = 0; |
@@ -141,26 +146,27 @@ nfattr_failure: | |||
141 | static inline int | 146 | static inline int |
142 | ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct nf_conn *ct) | 147 | ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct nf_conn *ct) |
143 | { | 148 | { |
144 | struct nf_conntrack_protocol *proto = nf_ct_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum); | 149 | struct nf_conntrack_l4proto *l4proto = nf_ct_l4proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum); |
145 | struct nfattr *nest_proto; | 150 | struct nfattr *nest_proto; |
146 | int ret; | 151 | int ret; |
147 | 152 | ||
148 | if (!proto->to_nfattr) { | 153 | if (!l4proto->to_nfattr) { |
149 | nf_ct_proto_put(proto); | 154 | nf_ct_l4proto_put(l4proto); |
150 | return 0; | 155 | return 0; |
151 | } | 156 | } |
152 | 157 | ||
153 | nest_proto = NFA_NEST(skb, CTA_PROTOINFO); | 158 | nest_proto = NFA_NEST(skb, CTA_PROTOINFO); |
154 | 159 | ||
155 | ret = proto->to_nfattr(skb, nest_proto, ct); | 160 | ret = l4proto->to_nfattr(skb, nest_proto, ct); |
156 | 161 | ||
157 | nf_ct_proto_put(proto); | 162 | nf_ct_l4proto_put(l4proto); |
158 | 163 | ||
159 | NFA_NEST_END(skb, nest_proto); | 164 | NFA_NEST_END(skb, nest_proto); |
160 | 165 | ||
161 | return ret; | 166 | return ret; |
162 | 167 | ||
163 | nfattr_failure: | 168 | nfattr_failure: |
169 | nf_ct_l4proto_put(l4proto); | ||
164 | return -1; | 170 | return -1; |
165 | } | 171 | } |
166 | 172 | ||
@@ -194,7 +200,7 @@ ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, | |||
194 | { | 200 | { |
195 | enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; | 201 | enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; |
196 | struct nfattr *nest_count = NFA_NEST(skb, type); | 202 | struct nfattr *nest_count = NFA_NEST(skb, type); |
197 | u_int32_t tmp; | 203 | __be32 tmp; |
198 | 204 | ||
199 | tmp = htonl(ct->counters[dir].packets); | 205 | tmp = htonl(ct->counters[dir].packets); |
200 | NFA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(u_int32_t), &tmp); | 206 | NFA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(u_int32_t), &tmp); |
@@ -217,7 +223,7 @@ nfattr_failure: | |||
217 | static inline int | 223 | static inline int |
218 | ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) | 224 | ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) |
219 | { | 225 | { |
220 | u_int32_t mark = htonl(ct->mark); | 226 | __be32 mark = htonl(ct->mark); |
221 | 227 | ||
222 | NFA_PUT(skb, CTA_MARK, sizeof(u_int32_t), &mark); | 228 | NFA_PUT(skb, CTA_MARK, sizeof(u_int32_t), &mark); |
223 | return 0; | 229 | return 0; |
@@ -232,7 +238,7 @@ nfattr_failure: | |||
232 | static inline int | 238 | static inline int |
233 | ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) | 239 | ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) |
234 | { | 240 | { |
235 | u_int32_t id = htonl(ct->id); | 241 | __be32 id = htonl(ct->id); |
236 | NFA_PUT(skb, CTA_ID, sizeof(u_int32_t), &id); | 242 | NFA_PUT(skb, CTA_ID, sizeof(u_int32_t), &id); |
237 | return 0; | 243 | return 0; |
238 | 244 | ||
@@ -243,7 +249,7 @@ nfattr_failure: | |||
243 | static inline int | 249 | static inline int |
244 | ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) | 250 | ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) |
245 | { | 251 | { |
246 | u_int32_t use = htonl(atomic_read(&ct->ct_general.use)); | 252 | __be32 use = htonl(atomic_read(&ct->ct_general.use)); |
247 | 253 | ||
248 | NFA_PUT(skb, CTA_USE, sizeof(u_int32_t), &use); | 254 | NFA_PUT(skb, CTA_USE, sizeof(u_int32_t), &use); |
249 | return 0; | 255 | return 0; |
@@ -329,8 +335,6 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, | |||
329 | } else if (events & (IPCT_NEW | IPCT_RELATED)) { | 335 | } else if (events & (IPCT_NEW | IPCT_RELATED)) { |
330 | type = IPCTNL_MSG_CT_NEW; | 336 | type = IPCTNL_MSG_CT_NEW; |
331 | flags = NLM_F_CREATE|NLM_F_EXCL; | 337 | flags = NLM_F_CREATE|NLM_F_EXCL; |
332 | /* dump everything */ | ||
333 | events = ~0UL; | ||
334 | group = NFNLGRP_CONNTRACK_NEW; | 338 | group = NFNLGRP_CONNTRACK_NEW; |
335 | } else if (events & (IPCT_STATUS | IPCT_PROTOINFO)) { | 339 | } else if (events & (IPCT_STATUS | IPCT_PROTOINFO)) { |
336 | type = IPCTNL_MSG_CT_NEW; | 340 | type = IPCTNL_MSG_CT_NEW; |
@@ -365,28 +369,35 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, | |||
365 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) | 369 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) |
366 | goto nfattr_failure; | 370 | goto nfattr_failure; |
367 | NFA_NEST_END(skb, nest_parms); | 371 | NFA_NEST_END(skb, nest_parms); |
368 | |||
369 | /* NAT stuff is now a status flag */ | ||
370 | if ((events & IPCT_STATUS || events & IPCT_NATINFO) | ||
371 | && ctnetlink_dump_status(skb, ct) < 0) | ||
372 | goto nfattr_failure; | ||
373 | if (events & IPCT_REFRESH | ||
374 | && ctnetlink_dump_timeout(skb, ct) < 0) | ||
375 | goto nfattr_failure; | ||
376 | if (events & IPCT_PROTOINFO | ||
377 | && ctnetlink_dump_protoinfo(skb, ct) < 0) | ||
378 | goto nfattr_failure; | ||
379 | if (events & IPCT_HELPINFO | ||
380 | && ctnetlink_dump_helpinfo(skb, ct) < 0) | ||
381 | goto nfattr_failure; | ||
382 | 372 | ||
383 | if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || | 373 | if (events & IPCT_DESTROY) { |
384 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) | 374 | if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || |
385 | goto nfattr_failure; | 375 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) |
376 | goto nfattr_failure; | ||
377 | } else { | ||
378 | if (ctnetlink_dump_status(skb, ct) < 0) | ||
379 | goto nfattr_failure; | ||
386 | 380 | ||
387 | if (events & IPCT_MARK | 381 | if (ctnetlink_dump_timeout(skb, ct) < 0) |
388 | && ctnetlink_dump_mark(skb, ct) < 0) | 382 | goto nfattr_failure; |
389 | goto nfattr_failure; | 383 | |
384 | if (events & IPCT_PROTOINFO | ||
385 | && ctnetlink_dump_protoinfo(skb, ct) < 0) | ||
386 | goto nfattr_failure; | ||
387 | |||
388 | if ((events & IPCT_HELPER || nfct_help(ct)) | ||
389 | && ctnetlink_dump_helpinfo(skb, ct) < 0) | ||
390 | goto nfattr_failure; | ||
391 | |||
392 | if ((events & IPCT_MARK || ct->mark) | ||
393 | && ctnetlink_dump_mark(skb, ct) < 0) | ||
394 | goto nfattr_failure; | ||
395 | |||
396 | if (events & IPCT_COUNTER_FILLING && | ||
397 | (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || | ||
398 | ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)) | ||
399 | goto nfattr_failure; | ||
400 | } | ||
390 | 401 | ||
391 | nlh->nlmsg_len = skb->tail - b; | 402 | nlh->nlmsg_len = skb->tail - b; |
392 | nfnetlink_send(skb, 0, group, 0); | 403 | nfnetlink_send(skb, 0, group, 0); |
@@ -423,7 +434,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | |||
423 | restart: | 434 | restart: |
424 | list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) { | 435 | list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) { |
425 | h = (struct nf_conntrack_tuple_hash *) i; | 436 | h = (struct nf_conntrack_tuple_hash *) i; |
426 | if (DIRECTION(h) != IP_CT_DIR_ORIGINAL) | 437 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) |
427 | continue; | 438 | continue; |
428 | ct = nf_ct_tuplehash_to_ctrack(h); | 439 | ct = nf_ct_tuplehash_to_ctrack(h); |
429 | /* Dump entries of a given L3 protocol number. | 440 | /* Dump entries of a given L3 protocol number. |
@@ -491,7 +502,7 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr, | |||
491 | struct nf_conntrack_tuple *tuple) | 502 | struct nf_conntrack_tuple *tuple) |
492 | { | 503 | { |
493 | struct nfattr *tb[CTA_PROTO_MAX]; | 504 | struct nfattr *tb[CTA_PROTO_MAX]; |
494 | struct nf_conntrack_protocol *proto; | 505 | struct nf_conntrack_l4proto *l4proto; |
495 | int ret = 0; | 506 | int ret = 0; |
496 | 507 | ||
497 | nfattr_parse_nested(tb, CTA_PROTO_MAX, attr); | 508 | nfattr_parse_nested(tb, CTA_PROTO_MAX, attr); |
@@ -503,12 +514,12 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr, | |||
503 | return -EINVAL; | 514 | return -EINVAL; |
504 | tuple->dst.protonum = *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_NUM-1]); | 515 | tuple->dst.protonum = *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_NUM-1]); |
505 | 516 | ||
506 | proto = nf_ct_proto_find_get(tuple->src.l3num, tuple->dst.protonum); | 517 | l4proto = nf_ct_l4proto_find_get(tuple->src.l3num, tuple->dst.protonum); |
507 | 518 | ||
508 | if (likely(proto->nfattr_to_tuple)) | 519 | if (likely(l4proto->nfattr_to_tuple)) |
509 | ret = proto->nfattr_to_tuple(tb, tuple); | 520 | ret = l4proto->nfattr_to_tuple(tb, tuple); |
510 | 521 | ||
511 | nf_ct_proto_put(proto); | 522 | nf_ct_l4proto_put(l4proto); |
512 | 523 | ||
513 | return ret; | 524 | return ret; |
514 | } | 525 | } |
@@ -549,28 +560,28 @@ ctnetlink_parse_tuple(struct nfattr *cda[], struct nf_conntrack_tuple *tuple, | |||
549 | return 0; | 560 | return 0; |
550 | } | 561 | } |
551 | 562 | ||
552 | #ifdef CONFIG_IP_NF_NAT_NEEDED | 563 | #ifdef CONFIG_NF_NAT_NEEDED |
553 | static const size_t cta_min_protonat[CTA_PROTONAT_MAX] = { | 564 | static const size_t cta_min_protonat[CTA_PROTONAT_MAX] = { |
554 | [CTA_PROTONAT_PORT_MIN-1] = sizeof(u_int16_t), | 565 | [CTA_PROTONAT_PORT_MIN-1] = sizeof(u_int16_t), |
555 | [CTA_PROTONAT_PORT_MAX-1] = sizeof(u_int16_t), | 566 | [CTA_PROTONAT_PORT_MAX-1] = sizeof(u_int16_t), |
556 | }; | 567 | }; |
557 | 568 | ||
558 | static int ctnetlink_parse_nat_proto(struct nfattr *attr, | 569 | static int nfnetlink_parse_nat_proto(struct nfattr *attr, |
559 | const struct nf_conn *ct, | 570 | const struct nf_conn *ct, |
560 | struct ip_nat_range *range) | 571 | struct nf_nat_range *range) |
561 | { | 572 | { |
562 | struct nfattr *tb[CTA_PROTONAT_MAX]; | 573 | struct nfattr *tb[CTA_PROTONAT_MAX]; |
563 | struct ip_nat_protocol *npt; | 574 | struct nf_nat_protocol *npt; |
564 | 575 | ||
565 | nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr); | 576 | nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr); |
566 | 577 | ||
567 | if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat)) | 578 | if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat)) |
568 | return -EINVAL; | 579 | return -EINVAL; |
569 | 580 | ||
570 | npt = ip_nat_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum); | 581 | npt = nf_nat_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum); |
571 | 582 | ||
572 | if (!npt->nfattr_to_range) { | 583 | if (!npt->nfattr_to_range) { |
573 | ip_nat_proto_put(npt); | 584 | nf_nat_proto_put(npt); |
574 | return 0; | 585 | return 0; |
575 | } | 586 | } |
576 | 587 | ||
@@ -578,7 +589,7 @@ static int ctnetlink_parse_nat_proto(struct nfattr *attr, | |||
578 | if (npt->nfattr_to_range(tb, range) > 0) | 589 | if (npt->nfattr_to_range(tb, range) > 0) |
579 | range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED; | 590 | range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED; |
580 | 591 | ||
581 | ip_nat_proto_put(npt); | 592 | nf_nat_proto_put(npt); |
582 | 593 | ||
583 | return 0; | 594 | return 0; |
584 | } | 595 | } |
@@ -589,8 +600,8 @@ static const size_t cta_min_nat[CTA_NAT_MAX] = { | |||
589 | }; | 600 | }; |
590 | 601 | ||
591 | static inline int | 602 | static inline int |
592 | ctnetlink_parse_nat(struct nfattr *nat, | 603 | nfnetlink_parse_nat(struct nfattr *nat, |
593 | const struct nf_conn *ct, struct ip_nat_range *range) | 604 | const struct nf_conn *ct, struct nf_nat_range *range) |
594 | { | 605 | { |
595 | struct nfattr *tb[CTA_NAT_MAX]; | 606 | struct nfattr *tb[CTA_NAT_MAX]; |
596 | int err; | 607 | int err; |
@@ -603,12 +614,12 @@ ctnetlink_parse_nat(struct nfattr *nat, | |||
603 | return -EINVAL; | 614 | return -EINVAL; |
604 | 615 | ||
605 | if (tb[CTA_NAT_MINIP-1]) | 616 | if (tb[CTA_NAT_MINIP-1]) |
606 | range->min_ip = *(u_int32_t *)NFA_DATA(tb[CTA_NAT_MINIP-1]); | 617 | range->min_ip = *(__be32 *)NFA_DATA(tb[CTA_NAT_MINIP-1]); |
607 | 618 | ||
608 | if (!tb[CTA_NAT_MAXIP-1]) | 619 | if (!tb[CTA_NAT_MAXIP-1]) |
609 | range->max_ip = range->min_ip; | 620 | range->max_ip = range->min_ip; |
610 | else | 621 | else |
611 | range->max_ip = *(u_int32_t *)NFA_DATA(tb[CTA_NAT_MAXIP-1]); | 622 | range->max_ip = *(__be32 *)NFA_DATA(tb[CTA_NAT_MAXIP-1]); |
612 | 623 | ||
613 | if (range->min_ip) | 624 | if (range->min_ip) |
614 | range->flags |= IP_NAT_RANGE_MAP_IPS; | 625 | range->flags |= IP_NAT_RANGE_MAP_IPS; |
@@ -616,7 +627,7 @@ ctnetlink_parse_nat(struct nfattr *nat, | |||
616 | if (!tb[CTA_NAT_PROTO-1]) | 627 | if (!tb[CTA_NAT_PROTO-1]) |
617 | return 0; | 628 | return 0; |
618 | 629 | ||
619 | err = ctnetlink_parse_nat_proto(tb[CTA_NAT_PROTO-1], ct, range); | 630 | err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO-1], ct, range); |
620 | if (err < 0) | 631 | if (err < 0) |
621 | return err; | 632 | return err; |
622 | 633 | ||
@@ -681,7 +692,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
681 | ct = nf_ct_tuplehash_to_ctrack(h); | 692 | ct = nf_ct_tuplehash_to_ctrack(h); |
682 | 693 | ||
683 | if (cda[CTA_ID-1]) { | 694 | if (cda[CTA_ID-1]) { |
684 | u_int32_t id = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_ID-1])); | 695 | u_int32_t id = ntohl(*(__be32 *)NFA_DATA(cda[CTA_ID-1])); |
685 | if (ct->id != id) { | 696 | if (ct->id != id) { |
686 | nf_ct_put(ct); | 697 | nf_ct_put(ct); |
687 | return -ENOENT; | 698 | return -ENOENT; |
@@ -751,7 +762,6 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
751 | nf_ct_put(ct); | 762 | nf_ct_put(ct); |
752 | return -ENOMEM; | 763 | return -ENOMEM; |
753 | } | 764 | } |
754 | NETLINK_CB(skb2).dst_pid = NETLINK_CB(skb).pid; | ||
755 | 765 | ||
756 | err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, | 766 | err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, |
757 | IPCTNL_MSG_CT_NEW, 1, ct); | 767 | IPCTNL_MSG_CT_NEW, 1, ct); |
@@ -775,7 +785,7 @@ static inline int | |||
775 | ctnetlink_change_status(struct nf_conn *ct, struct nfattr *cda[]) | 785 | ctnetlink_change_status(struct nf_conn *ct, struct nfattr *cda[]) |
776 | { | 786 | { |
777 | unsigned long d; | 787 | unsigned long d; |
778 | unsigned status = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_STATUS-1])); | 788 | unsigned int status = ntohl(*(__be32 *)NFA_DATA(cda[CTA_STATUS-1])); |
779 | d = ct->status ^ status; | 789 | d = ct->status ^ status; |
780 | 790 | ||
781 | if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) | 791 | if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) |
@@ -792,35 +802,35 @@ ctnetlink_change_status(struct nf_conn *ct, struct nfattr *cda[]) | |||
792 | return -EINVAL; | 802 | return -EINVAL; |
793 | 803 | ||
794 | if (cda[CTA_NAT_SRC-1] || cda[CTA_NAT_DST-1]) { | 804 | if (cda[CTA_NAT_SRC-1] || cda[CTA_NAT_DST-1]) { |
795 | #ifndef CONFIG_IP_NF_NAT_NEEDED | 805 | #ifndef CONFIG_NF_NAT_NEEDED |
796 | return -EINVAL; | 806 | return -EINVAL; |
797 | #else | 807 | #else |
798 | struct ip_nat_range range; | 808 | struct nf_nat_range range; |
799 | 809 | ||
800 | if (cda[CTA_NAT_DST-1]) { | 810 | if (cda[CTA_NAT_DST-1]) { |
801 | if (ctnetlink_parse_nat(cda[CTA_NAT_DST-1], ct, | 811 | if (nfnetlink_parse_nat(cda[CTA_NAT_DST-1], ct, |
802 | &range) < 0) | 812 | &range) < 0) |
803 | return -EINVAL; | 813 | return -EINVAL; |
804 | if (ip_nat_initialized(ct, | 814 | if (nf_nat_initialized(ct, |
805 | HOOK2MANIP(NF_IP_PRE_ROUTING))) | 815 | HOOK2MANIP(NF_IP_PRE_ROUTING))) |
806 | return -EEXIST; | 816 | return -EEXIST; |
807 | ip_nat_setup_info(ct, &range, hooknum); | 817 | nf_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING); |
808 | } | 818 | } |
809 | if (cda[CTA_NAT_SRC-1]) { | 819 | if (cda[CTA_NAT_SRC-1]) { |
810 | if (ctnetlink_parse_nat(cda[CTA_NAT_SRC-1], ct, | 820 | if (nfnetlink_parse_nat(cda[CTA_NAT_SRC-1], ct, |
811 | &range) < 0) | 821 | &range) < 0) |
812 | return -EINVAL; | 822 | return -EINVAL; |
813 | if (ip_nat_initialized(ct, | 823 | if (nf_nat_initialized(ct, |
814 | HOOK2MANIP(NF_IP_POST_ROUTING))) | 824 | HOOK2MANIP(NF_IP_POST_ROUTING))) |
815 | return -EEXIST; | 825 | return -EEXIST; |
816 | ip_nat_setup_info(ct, &range, hooknum); | 826 | nf_nat_setup_info(ct, &range, NF_IP_POST_ROUTING); |
817 | } | 827 | } |
818 | #endif | 828 | #endif |
819 | } | 829 | } |
820 | 830 | ||
821 | /* Be careful here, modifying NAT bits can screw up things, | 831 | /* Be careful here, modifying NAT bits can screw up things, |
822 | * so don't let users modify them directly if they don't pass | 832 | * so don't let users modify them directly if they don't pass |
823 | * ip_nat_range. */ | 833 | * nf_nat_range. */ |
824 | ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK); | 834 | ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK); |
825 | return 0; | 835 | return 0; |
826 | } | 836 | } |
@@ -874,7 +884,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[]) | |||
874 | static inline int | 884 | static inline int |
875 | ctnetlink_change_timeout(struct nf_conn *ct, struct nfattr *cda[]) | 885 | ctnetlink_change_timeout(struct nf_conn *ct, struct nfattr *cda[]) |
876 | { | 886 | { |
877 | u_int32_t timeout = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_TIMEOUT-1])); | 887 | u_int32_t timeout = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1])); |
878 | 888 | ||
879 | if (!del_timer(&ct->timeout)) | 889 | if (!del_timer(&ct->timeout)) |
880 | return -ETIME; | 890 | return -ETIME; |
@@ -889,18 +899,18 @@ static inline int | |||
889 | ctnetlink_change_protoinfo(struct nf_conn *ct, struct nfattr *cda[]) | 899 | ctnetlink_change_protoinfo(struct nf_conn *ct, struct nfattr *cda[]) |
890 | { | 900 | { |
891 | struct nfattr *tb[CTA_PROTOINFO_MAX], *attr = cda[CTA_PROTOINFO-1]; | 901 | struct nfattr *tb[CTA_PROTOINFO_MAX], *attr = cda[CTA_PROTOINFO-1]; |
892 | struct nf_conntrack_protocol *proto; | 902 | struct nf_conntrack_l4proto *l4proto; |
893 | u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; | 903 | u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; |
894 | u_int16_t l3num = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; | 904 | u_int16_t l3num = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; |
895 | int err = 0; | 905 | int err = 0; |
896 | 906 | ||
897 | nfattr_parse_nested(tb, CTA_PROTOINFO_MAX, attr); | 907 | nfattr_parse_nested(tb, CTA_PROTOINFO_MAX, attr); |
898 | 908 | ||
899 | proto = nf_ct_proto_find_get(l3num, npt); | 909 | l4proto = nf_ct_l4proto_find_get(l3num, npt); |
900 | 910 | ||
901 | if (proto->from_nfattr) | 911 | if (l4proto->from_nfattr) |
902 | err = proto->from_nfattr(tb, ct); | 912 | err = l4proto->from_nfattr(tb, ct); |
903 | nf_ct_proto_put(proto); | 913 | nf_ct_l4proto_put(l4proto); |
904 | 914 | ||
905 | return err; | 915 | return err; |
906 | } | 916 | } |
@@ -936,7 +946,7 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nfattr *cda[]) | |||
936 | 946 | ||
937 | #if defined(CONFIG_NF_CONNTRACK_MARK) | 947 | #if defined(CONFIG_NF_CONNTRACK_MARK) |
938 | if (cda[CTA_MARK-1]) | 948 | if (cda[CTA_MARK-1]) |
939 | ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1])); | 949 | ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1])); |
940 | #endif | 950 | #endif |
941 | 951 | ||
942 | return 0; | 952 | return 0; |
@@ -949,6 +959,7 @@ ctnetlink_create_conntrack(struct nfattr *cda[], | |||
949 | { | 959 | { |
950 | struct nf_conn *ct; | 960 | struct nf_conn *ct; |
951 | int err = -EINVAL; | 961 | int err = -EINVAL; |
962 | struct nf_conn_help *help; | ||
952 | 963 | ||
953 | ct = nf_conntrack_alloc(otuple, rtuple); | 964 | ct = nf_conntrack_alloc(otuple, rtuple); |
954 | if (ct == NULL || IS_ERR(ct)) | 965 | if (ct == NULL || IS_ERR(ct)) |
@@ -956,14 +967,16 @@ ctnetlink_create_conntrack(struct nfattr *cda[], | |||
956 | 967 | ||
957 | if (!cda[CTA_TIMEOUT-1]) | 968 | if (!cda[CTA_TIMEOUT-1]) |
958 | goto err; | 969 | goto err; |
959 | ct->timeout.expires = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_TIMEOUT-1])); | 970 | ct->timeout.expires = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1])); |
960 | 971 | ||
961 | ct->timeout.expires = jiffies + ct->timeout.expires * HZ; | 972 | ct->timeout.expires = jiffies + ct->timeout.expires * HZ; |
962 | ct->status |= IPS_CONFIRMED; | 973 | ct->status |= IPS_CONFIRMED; |
963 | 974 | ||
964 | err = ctnetlink_change_status(ct, cda); | 975 | if (cda[CTA_STATUS-1]) { |
965 | if (err < 0) | 976 | err = ctnetlink_change_status(ct, cda); |
966 | goto err; | 977 | if (err < 0) |
978 | goto err; | ||
979 | } | ||
967 | 980 | ||
968 | if (cda[CTA_PROTOINFO-1]) { | 981 | if (cda[CTA_PROTOINFO-1]) { |
969 | err = ctnetlink_change_protoinfo(ct, cda); | 982 | err = ctnetlink_change_protoinfo(ct, cda); |
@@ -973,12 +986,19 @@ ctnetlink_create_conntrack(struct nfattr *cda[], | |||
973 | 986 | ||
974 | #if defined(CONFIG_NF_CONNTRACK_MARK) | 987 | #if defined(CONFIG_NF_CONNTRACK_MARK) |
975 | if (cda[CTA_MARK-1]) | 988 | if (cda[CTA_MARK-1]) |
976 | ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1])); | 989 | ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1])); |
977 | #endif | 990 | #endif |
978 | 991 | ||
992 | help = nfct_help(ct); | ||
993 | if (help) | ||
994 | help->helper = nf_ct_helper_find_get(rtuple); | ||
995 | |||
979 | add_timer(&ct->timeout); | 996 | add_timer(&ct->timeout); |
980 | nf_conntrack_hash_insert(ct); | 997 | nf_conntrack_hash_insert(ct); |
981 | 998 | ||
999 | if (help && help->helper) | ||
1000 | nf_ct_helper_put(help->helper); | ||
1001 | |||
982 | return 0; | 1002 | return 0; |
983 | 1003 | ||
984 | err: | 1004 | err: |
@@ -1072,7 +1092,7 @@ ctnetlink_exp_dump_mask(struct sk_buff *skb, | |||
1072 | { | 1092 | { |
1073 | int ret; | 1093 | int ret; |
1074 | struct nf_conntrack_l3proto *l3proto; | 1094 | struct nf_conntrack_l3proto *l3proto; |
1075 | struct nf_conntrack_protocol *proto; | 1095 | struct nf_conntrack_l4proto *l4proto; |
1076 | struct nfattr *nest_parms = NFA_NEST(skb, CTA_EXPECT_MASK); | 1096 | struct nfattr *nest_parms = NFA_NEST(skb, CTA_EXPECT_MASK); |
1077 | 1097 | ||
1078 | l3proto = nf_ct_l3proto_find_get(tuple->src.l3num); | 1098 | l3proto = nf_ct_l3proto_find_get(tuple->src.l3num); |
@@ -1082,9 +1102,9 @@ ctnetlink_exp_dump_mask(struct sk_buff *skb, | |||
1082 | if (unlikely(ret < 0)) | 1102 | if (unlikely(ret < 0)) |
1083 | goto nfattr_failure; | 1103 | goto nfattr_failure; |
1084 | 1104 | ||
1085 | proto = nf_ct_proto_find_get(tuple->src.l3num, tuple->dst.protonum); | 1105 | l4proto = nf_ct_l4proto_find_get(tuple->src.l3num, tuple->dst.protonum); |
1086 | ret = ctnetlink_dump_tuples_proto(skb, mask, proto); | 1106 | ret = ctnetlink_dump_tuples_proto(skb, mask, l4proto); |
1087 | nf_ct_proto_put(proto); | 1107 | nf_ct_l4proto_put(l4proto); |
1088 | if (unlikely(ret < 0)) | 1108 | if (unlikely(ret < 0)) |
1089 | goto nfattr_failure; | 1109 | goto nfattr_failure; |
1090 | 1110 | ||
@@ -1101,8 +1121,8 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb, | |||
1101 | const struct nf_conntrack_expect *exp) | 1121 | const struct nf_conntrack_expect *exp) |
1102 | { | 1122 | { |
1103 | struct nf_conn *master = exp->master; | 1123 | struct nf_conn *master = exp->master; |
1104 | u_int32_t timeout = htonl((exp->timeout.expires - jiffies) / HZ); | 1124 | __be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ); |
1105 | u_int32_t id = htonl(exp->id); | 1125 | __be32 id = htonl(exp->id); |
1106 | 1126 | ||
1107 | if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0) | 1127 | if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0) |
1108 | goto nfattr_failure; | 1128 | goto nfattr_failure; |
@@ -1275,12 +1295,12 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1275 | if (err < 0) | 1295 | if (err < 0) |
1276 | return err; | 1296 | return err; |
1277 | 1297 | ||
1278 | exp = nf_conntrack_expect_find(&tuple); | 1298 | exp = nf_conntrack_expect_find_get(&tuple); |
1279 | if (!exp) | 1299 | if (!exp) |
1280 | return -ENOENT; | 1300 | return -ENOENT; |
1281 | 1301 | ||
1282 | if (cda[CTA_EXPECT_ID-1]) { | 1302 | if (cda[CTA_EXPECT_ID-1]) { |
1283 | u_int32_t id = *(u_int32_t *)NFA_DATA(cda[CTA_EXPECT_ID-1]); | 1303 | __be32 id = *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]); |
1284 | if (exp->id != ntohl(id)) { | 1304 | if (exp->id != ntohl(id)) { |
1285 | nf_conntrack_expect_put(exp); | 1305 | nf_conntrack_expect_put(exp); |
1286 | return -ENOENT; | 1306 | return -ENOENT; |
@@ -1291,8 +1311,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1291 | skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); | 1311 | skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
1292 | if (!skb2) | 1312 | if (!skb2) |
1293 | goto out; | 1313 | goto out; |
1294 | NETLINK_CB(skb2).dst_pid = NETLINK_CB(skb).pid; | 1314 | |
1295 | |||
1296 | err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, | 1315 | err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, |
1297 | nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, | 1316 | nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, |
1298 | 1, exp); | 1317 | 1, exp); |
@@ -1331,13 +1350,12 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1331 | return err; | 1350 | return err; |
1332 | 1351 | ||
1333 | /* bump usage count to 2 */ | 1352 | /* bump usage count to 2 */ |
1334 | exp = nf_conntrack_expect_find(&tuple); | 1353 | exp = nf_conntrack_expect_find_get(&tuple); |
1335 | if (!exp) | 1354 | if (!exp) |
1336 | return -ENOENT; | 1355 | return -ENOENT; |
1337 | 1356 | ||
1338 | if (cda[CTA_EXPECT_ID-1]) { | 1357 | if (cda[CTA_EXPECT_ID-1]) { |
1339 | u_int32_t id = | 1358 | __be32 id = *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]); |
1340 | *(u_int32_t *)NFA_DATA(cda[CTA_EXPECT_ID-1]); | ||
1341 | if (exp->id != ntohl(id)) { | 1359 | if (exp->id != ntohl(id)) { |
1342 | nf_conntrack_expect_put(exp); | 1360 | nf_conntrack_expect_put(exp); |
1343 | return -ENOENT; | 1361 | return -ENOENT; |
@@ -1433,6 +1451,7 @@ ctnetlink_create_expect(struct nfattr *cda[], u_int8_t u3) | |||
1433 | exp->expectfn = NULL; | 1451 | exp->expectfn = NULL; |
1434 | exp->flags = 0; | 1452 | exp->flags = 0; |
1435 | exp->master = ct; | 1453 | exp->master = ct; |
1454 | exp->helper = NULL; | ||
1436 | memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple)); | 1455 | memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple)); |
1437 | memcpy(&exp->mask, &mask, sizeof(struct nf_conntrack_tuple)); | 1456 | memcpy(&exp->mask, &mask, sizeof(struct nf_conntrack_tuple)); |
1438 | 1457 | ||
@@ -1529,6 +1548,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = { | |||
1529 | .cb = ctnl_exp_cb, | 1548 | .cb = ctnl_exp_cb, |
1530 | }; | 1549 | }; |
1531 | 1550 | ||
1551 | MODULE_ALIAS("ip_conntrack_netlink"); | ||
1532 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); | 1552 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); |
1533 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); | 1553 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); |
1534 | 1554 | ||
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c new file mode 100644 index 000000000000..f0ff00e0d052 --- /dev/null +++ b/net/netfilter/nf_conntrack_pptp.c | |||
@@ -0,0 +1,607 @@ | |||
1 | /* | ||
2 | * Connection tracking support for PPTP (Point to Point Tunneling Protocol). | ||
3 | * PPTP is a a protocol for creating virtual private networks. | ||
4 | * It is a specification defined by Microsoft and some vendors | ||
5 | * working with Microsoft. PPTP is built on top of a modified | ||
6 | * version of the Internet Generic Routing Encapsulation Protocol. | ||
7 | * GRE is defined in RFC 1701 and RFC 1702. Documentation of | ||
8 | * PPTP can be found in RFC 2637 | ||
9 | * | ||
10 | * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org> | ||
11 | * | ||
12 | * Development of this code funded by Astaro AG (http://www.astaro.com/) | ||
13 | * | ||
14 | * Limitations: | ||
15 | * - We blindly assume that control connections are always | ||
16 | * established in PNS->PAC direction. This is a violation | ||
17 | * of RFFC2673 | ||
18 | * - We can only support one single call within each session | ||
19 | * TODO: | ||
20 | * - testing of incoming PPTP calls | ||
21 | */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/skbuff.h> | ||
25 | #include <linux/in.h> | ||
26 | #include <linux/tcp.h> | ||
27 | |||
28 | #include <net/netfilter/nf_conntrack.h> | ||
29 | #include <net/netfilter/nf_conntrack_core.h> | ||
30 | #include <net/netfilter/nf_conntrack_helper.h> | ||
31 | #include <linux/netfilter/nf_conntrack_proto_gre.h> | ||
32 | #include <linux/netfilter/nf_conntrack_pptp.h> | ||
33 | |||
34 | #define NF_CT_PPTP_VERSION "3.1" | ||
35 | |||
36 | MODULE_LICENSE("GPL"); | ||
37 | MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); | ||
38 | MODULE_DESCRIPTION("Netfilter connection tracking helper module for PPTP"); | ||
39 | MODULE_ALIAS("ip_conntrack_pptp"); | ||
40 | |||
41 | static DEFINE_SPINLOCK(nf_pptp_lock); | ||
42 | |||
43 | int | ||
44 | (*nf_nat_pptp_hook_outbound)(struct sk_buff **pskb, | ||
45 | struct nf_conn *ct, enum ip_conntrack_info ctinfo, | ||
46 | struct PptpControlHeader *ctlh, | ||
47 | union pptp_ctrl_union *pptpReq) __read_mostly; | ||
48 | EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_outbound); | ||
49 | |||
50 | int | ||
51 | (*nf_nat_pptp_hook_inbound)(struct sk_buff **pskb, | ||
52 | struct nf_conn *ct, enum ip_conntrack_info ctinfo, | ||
53 | struct PptpControlHeader *ctlh, | ||
54 | union pptp_ctrl_union *pptpReq) __read_mostly; | ||
55 | EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_inbound); | ||
56 | |||
57 | void | ||
58 | (*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *expect_orig, | ||
59 | struct nf_conntrack_expect *expect_reply) | ||
60 | __read_mostly; | ||
61 | EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_exp_gre); | ||
62 | |||
63 | void | ||
64 | (*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct, | ||
65 | struct nf_conntrack_expect *exp) __read_mostly; | ||
66 | EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn); | ||
67 | |||
68 | #if 0 | ||
69 | /* PptpControlMessageType names */ | ||
70 | const char *pptp_msg_name[] = { | ||
71 | "UNKNOWN_MESSAGE", | ||
72 | "START_SESSION_REQUEST", | ||
73 | "START_SESSION_REPLY", | ||
74 | "STOP_SESSION_REQUEST", | ||
75 | "STOP_SESSION_REPLY", | ||
76 | "ECHO_REQUEST", | ||
77 | "ECHO_REPLY", | ||
78 | "OUT_CALL_REQUEST", | ||
79 | "OUT_CALL_REPLY", | ||
80 | "IN_CALL_REQUEST", | ||
81 | "IN_CALL_REPLY", | ||
82 | "IN_CALL_CONNECT", | ||
83 | "CALL_CLEAR_REQUEST", | ||
84 | "CALL_DISCONNECT_NOTIFY", | ||
85 | "WAN_ERROR_NOTIFY", | ||
86 | "SET_LINK_INFO" | ||
87 | }; | ||
88 | EXPORT_SYMBOL(pptp_msg_name); | ||
89 | #define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args) | ||
90 | #else | ||
91 | #define DEBUGP(format, args...) | ||
92 | #endif | ||
93 | |||
94 | #define SECS *HZ | ||
95 | #define MINS * 60 SECS | ||
96 | #define HOURS * 60 MINS | ||
97 | |||
98 | #define PPTP_GRE_TIMEOUT (10 MINS) | ||
99 | #define PPTP_GRE_STREAM_TIMEOUT (5 HOURS) | ||
100 | |||
101 | static void pptp_expectfn(struct nf_conn *ct, | ||
102 | struct nf_conntrack_expect *exp) | ||
103 | { | ||
104 | typeof(nf_nat_pptp_hook_expectfn) nf_nat_pptp_expectfn; | ||
105 | DEBUGP("increasing timeouts\n"); | ||
106 | |||
107 | /* increase timeout of GRE data channel conntrack entry */ | ||
108 | ct->proto.gre.timeout = PPTP_GRE_TIMEOUT; | ||
109 | ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT; | ||
110 | |||
111 | /* Can you see how rusty this code is, compared with the pre-2.6.11 | ||
112 | * one? That's what happened to my shiny newnat of 2002 ;( -HW */ | ||
113 | |||
114 | rcu_read_lock(); | ||
115 | nf_nat_pptp_expectfn = rcu_dereference(nf_nat_pptp_hook_expectfn); | ||
116 | if (nf_nat_pptp_expectfn && ct->status & IPS_NAT_MASK) | ||
117 | nf_nat_pptp_expectfn(ct, exp); | ||
118 | else { | ||
119 | struct nf_conntrack_tuple inv_t; | ||
120 | struct nf_conntrack_expect *exp_other; | ||
121 | |||
122 | /* obviously this tuple inversion only works until you do NAT */ | ||
123 | nf_ct_invert_tuplepr(&inv_t, &exp->tuple); | ||
124 | DEBUGP("trying to unexpect other dir: "); | ||
125 | NF_CT_DUMP_TUPLE(&inv_t); | ||
126 | |||
127 | exp_other = nf_conntrack_expect_find_get(&inv_t); | ||
128 | if (exp_other) { | ||
129 | /* delete other expectation. */ | ||
130 | DEBUGP("found\n"); | ||
131 | nf_conntrack_unexpect_related(exp_other); | ||
132 | nf_conntrack_expect_put(exp_other); | ||
133 | } else { | ||
134 | DEBUGP("not found\n"); | ||
135 | } | ||
136 | } | ||
137 | rcu_read_unlock(); | ||
138 | } | ||
139 | |||
140 | static int destroy_sibling_or_exp(const struct nf_conntrack_tuple *t) | ||
141 | { | ||
142 | struct nf_conntrack_tuple_hash *h; | ||
143 | struct nf_conntrack_expect *exp; | ||
144 | struct nf_conn *sibling; | ||
145 | |||
146 | DEBUGP("trying to timeout ct or exp for tuple "); | ||
147 | NF_CT_DUMP_TUPLE(t); | ||
148 | |||
149 | h = nf_conntrack_find_get(t, NULL); | ||
150 | if (h) { | ||
151 | sibling = nf_ct_tuplehash_to_ctrack(h); | ||
152 | DEBUGP("setting timeout of conntrack %p to 0\n", sibling); | ||
153 | sibling->proto.gre.timeout = 0; | ||
154 | sibling->proto.gre.stream_timeout = 0; | ||
155 | if (del_timer(&sibling->timeout)) | ||
156 | sibling->timeout.function((unsigned long)sibling); | ||
157 | nf_ct_put(sibling); | ||
158 | return 1; | ||
159 | } else { | ||
160 | exp = nf_conntrack_expect_find_get(t); | ||
161 | if (exp) { | ||
162 | DEBUGP("unexpect_related of expect %p\n", exp); | ||
163 | nf_conntrack_unexpect_related(exp); | ||
164 | nf_conntrack_expect_put(exp); | ||
165 | return 1; | ||
166 | } | ||
167 | } | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | /* timeout GRE data connections */ | ||
172 | static void pptp_destroy_siblings(struct nf_conn *ct) | ||
173 | { | ||
174 | struct nf_conn_help *help = nfct_help(ct); | ||
175 | struct nf_conntrack_tuple t; | ||
176 | |||
177 | nf_ct_gre_keymap_destroy(ct); | ||
178 | |||
179 | /* try original (pns->pac) tuple */ | ||
180 | memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t)); | ||
181 | t.dst.protonum = IPPROTO_GRE; | ||
182 | t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id; | ||
183 | t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id; | ||
184 | if (!destroy_sibling_or_exp(&t)) | ||
185 | DEBUGP("failed to timeout original pns->pac ct/exp\n"); | ||
186 | |||
187 | /* try reply (pac->pns) tuple */ | ||
188 | memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t)); | ||
189 | t.dst.protonum = IPPROTO_GRE; | ||
190 | t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id; | ||
191 | t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id; | ||
192 | if (!destroy_sibling_or_exp(&t)) | ||
193 | DEBUGP("failed to timeout reply pac->pns ct/exp\n"); | ||
194 | } | ||
195 | |||
196 | /* expect GRE connections (PNS->PAC and PAC->PNS direction) */ | ||
197 | static int exp_gre(struct nf_conn *ct, __be16 callid, __be16 peer_callid) | ||
198 | { | ||
199 | struct nf_conntrack_expect *exp_orig, *exp_reply; | ||
200 | enum ip_conntrack_dir dir; | ||
201 | int ret = 1; | ||
202 | typeof(nf_nat_pptp_hook_exp_gre) nf_nat_pptp_exp_gre; | ||
203 | |||
204 | exp_orig = nf_conntrack_expect_alloc(ct); | ||
205 | if (exp_orig == NULL) | ||
206 | goto out; | ||
207 | |||
208 | exp_reply = nf_conntrack_expect_alloc(ct); | ||
209 | if (exp_reply == NULL) | ||
210 | goto out_put_orig; | ||
211 | |||
212 | /* original direction, PNS->PAC */ | ||
213 | dir = IP_CT_DIR_ORIGINAL; | ||
214 | nf_conntrack_expect_init(exp_orig, ct->tuplehash[dir].tuple.src.l3num, | ||
215 | &ct->tuplehash[dir].tuple.src.u3, | ||
216 | &ct->tuplehash[dir].tuple.dst.u3, | ||
217 | IPPROTO_GRE, &peer_callid, &callid); | ||
218 | exp_orig->expectfn = pptp_expectfn; | ||
219 | |||
220 | /* reply direction, PAC->PNS */ | ||
221 | dir = IP_CT_DIR_REPLY; | ||
222 | nf_conntrack_expect_init(exp_reply, ct->tuplehash[dir].tuple.src.l3num, | ||
223 | &ct->tuplehash[dir].tuple.src.u3, | ||
224 | &ct->tuplehash[dir].tuple.dst.u3, | ||
225 | IPPROTO_GRE, &callid, &peer_callid); | ||
226 | exp_reply->expectfn = pptp_expectfn; | ||
227 | |||
228 | nf_nat_pptp_exp_gre = rcu_dereference(nf_nat_pptp_hook_exp_gre); | ||
229 | if (nf_nat_pptp_exp_gre && ct->status & IPS_NAT_MASK) | ||
230 | nf_nat_pptp_exp_gre(exp_orig, exp_reply); | ||
231 | if (nf_conntrack_expect_related(exp_orig) != 0) | ||
232 | goto out_put_both; | ||
233 | if (nf_conntrack_expect_related(exp_reply) != 0) | ||
234 | goto out_unexpect_orig; | ||
235 | |||
236 | /* Add GRE keymap entries */ | ||
237 | if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_ORIGINAL, &exp_orig->tuple) != 0) | ||
238 | goto out_unexpect_both; | ||
239 | if (nf_ct_gre_keymap_add(ct, IP_CT_DIR_REPLY, &exp_reply->tuple) != 0) { | ||
240 | nf_ct_gre_keymap_destroy(ct); | ||
241 | goto out_unexpect_both; | ||
242 | } | ||
243 | ret = 0; | ||
244 | |||
245 | out_put_both: | ||
246 | nf_conntrack_expect_put(exp_reply); | ||
247 | out_put_orig: | ||
248 | nf_conntrack_expect_put(exp_orig); | ||
249 | out: | ||
250 | return ret; | ||
251 | |||
252 | out_unexpect_both: | ||
253 | nf_conntrack_unexpect_related(exp_reply); | ||
254 | out_unexpect_orig: | ||
255 | nf_conntrack_unexpect_related(exp_orig); | ||
256 | goto out_put_both; | ||
257 | } | ||
258 | |||
259 | static inline int | ||
260 | pptp_inbound_pkt(struct sk_buff **pskb, | ||
261 | struct PptpControlHeader *ctlh, | ||
262 | union pptp_ctrl_union *pptpReq, | ||
263 | unsigned int reqlen, | ||
264 | struct nf_conn *ct, | ||
265 | enum ip_conntrack_info ctinfo) | ||
266 | { | ||
267 | struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info; | ||
268 | u_int16_t msg; | ||
269 | __be16 cid = 0, pcid = 0; | ||
270 | typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; | ||
271 | |||
272 | msg = ntohs(ctlh->messageType); | ||
273 | DEBUGP("inbound control message %s\n", pptp_msg_name[msg]); | ||
274 | |||
275 | switch (msg) { | ||
276 | case PPTP_START_SESSION_REPLY: | ||
277 | /* server confirms new control session */ | ||
278 | if (info->sstate < PPTP_SESSION_REQUESTED) | ||
279 | goto invalid; | ||
280 | if (pptpReq->srep.resultCode == PPTP_START_OK) | ||
281 | info->sstate = PPTP_SESSION_CONFIRMED; | ||
282 | else | ||
283 | info->sstate = PPTP_SESSION_ERROR; | ||
284 | break; | ||
285 | |||
286 | case PPTP_STOP_SESSION_REPLY: | ||
287 | /* server confirms end of control session */ | ||
288 | if (info->sstate > PPTP_SESSION_STOPREQ) | ||
289 | goto invalid; | ||
290 | if (pptpReq->strep.resultCode == PPTP_STOP_OK) | ||
291 | info->sstate = PPTP_SESSION_NONE; | ||
292 | else | ||
293 | info->sstate = PPTP_SESSION_ERROR; | ||
294 | break; | ||
295 | |||
296 | case PPTP_OUT_CALL_REPLY: | ||
297 | /* server accepted call, we now expect GRE frames */ | ||
298 | if (info->sstate != PPTP_SESSION_CONFIRMED) | ||
299 | goto invalid; | ||
300 | if (info->cstate != PPTP_CALL_OUT_REQ && | ||
301 | info->cstate != PPTP_CALL_OUT_CONF) | ||
302 | goto invalid; | ||
303 | |||
304 | cid = pptpReq->ocack.callID; | ||
305 | pcid = pptpReq->ocack.peersCallID; | ||
306 | if (info->pns_call_id != pcid) | ||
307 | goto invalid; | ||
308 | DEBUGP("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], | ||
309 | ntohs(cid), ntohs(pcid)); | ||
310 | |||
311 | if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { | ||
312 | info->cstate = PPTP_CALL_OUT_CONF; | ||
313 | info->pac_call_id = cid; | ||
314 | exp_gre(ct, cid, pcid); | ||
315 | } else | ||
316 | info->cstate = PPTP_CALL_NONE; | ||
317 | break; | ||
318 | |||
319 | case PPTP_IN_CALL_REQUEST: | ||
320 | /* server tells us about incoming call request */ | ||
321 | if (info->sstate != PPTP_SESSION_CONFIRMED) | ||
322 | goto invalid; | ||
323 | |||
324 | cid = pptpReq->icreq.callID; | ||
325 | DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); | ||
326 | info->cstate = PPTP_CALL_IN_REQ; | ||
327 | info->pac_call_id = cid; | ||
328 | break; | ||
329 | |||
330 | case PPTP_IN_CALL_CONNECT: | ||
331 | /* server tells us about incoming call established */ | ||
332 | if (info->sstate != PPTP_SESSION_CONFIRMED) | ||
333 | goto invalid; | ||
334 | if (info->cstate != PPTP_CALL_IN_REP && | ||
335 | info->cstate != PPTP_CALL_IN_CONF) | ||
336 | goto invalid; | ||
337 | |||
338 | pcid = pptpReq->iccon.peersCallID; | ||
339 | cid = info->pac_call_id; | ||
340 | |||
341 | if (info->pns_call_id != pcid) | ||
342 | goto invalid; | ||
343 | |||
344 | DEBUGP("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid)); | ||
345 | info->cstate = PPTP_CALL_IN_CONF; | ||
346 | |||
347 | /* we expect a GRE connection from PAC to PNS */ | ||
348 | exp_gre(ct, cid, pcid); | ||
349 | break; | ||
350 | |||
351 | case PPTP_CALL_DISCONNECT_NOTIFY: | ||
352 | /* server confirms disconnect */ | ||
353 | cid = pptpReq->disc.callID; | ||
354 | DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); | ||
355 | info->cstate = PPTP_CALL_NONE; | ||
356 | |||
357 | /* untrack this call id, unexpect GRE packets */ | ||
358 | pptp_destroy_siblings(ct); | ||
359 | break; | ||
360 | |||
361 | case PPTP_WAN_ERROR_NOTIFY: | ||
362 | case PPTP_ECHO_REQUEST: | ||
363 | case PPTP_ECHO_REPLY: | ||
364 | /* I don't have to explain these ;) */ | ||
365 | break; | ||
366 | |||
367 | default: | ||
368 | goto invalid; | ||
369 | } | ||
370 | |||
371 | nf_nat_pptp_inbound = rcu_dereference(nf_nat_pptp_hook_inbound); | ||
372 | if (nf_nat_pptp_inbound && ct->status & IPS_NAT_MASK) | ||
373 | return nf_nat_pptp_inbound(pskb, ct, ctinfo, ctlh, pptpReq); | ||
374 | return NF_ACCEPT; | ||
375 | |||
376 | invalid: | ||
377 | DEBUGP("invalid %s: type=%d cid=%u pcid=%u " | ||
378 | "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", | ||
379 | msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], | ||
380 | msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, | ||
381 | ntohs(info->pns_call_id), ntohs(info->pac_call_id)); | ||
382 | return NF_ACCEPT; | ||
383 | } | ||
384 | |||
385 | static inline int | ||
386 | pptp_outbound_pkt(struct sk_buff **pskb, | ||
387 | struct PptpControlHeader *ctlh, | ||
388 | union pptp_ctrl_union *pptpReq, | ||
389 | unsigned int reqlen, | ||
390 | struct nf_conn *ct, | ||
391 | enum ip_conntrack_info ctinfo) | ||
392 | { | ||
393 | struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info; | ||
394 | u_int16_t msg; | ||
395 | __be16 cid = 0, pcid = 0; | ||
396 | typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; | ||
397 | |||
398 | msg = ntohs(ctlh->messageType); | ||
399 | DEBUGP("outbound control message %s\n", pptp_msg_name[msg]); | ||
400 | |||
401 | switch (msg) { | ||
402 | case PPTP_START_SESSION_REQUEST: | ||
403 | /* client requests for new control session */ | ||
404 | if (info->sstate != PPTP_SESSION_NONE) | ||
405 | goto invalid; | ||
406 | info->sstate = PPTP_SESSION_REQUESTED; | ||
407 | break; | ||
408 | |||
409 | case PPTP_STOP_SESSION_REQUEST: | ||
410 | /* client requests end of control session */ | ||
411 | info->sstate = PPTP_SESSION_STOPREQ; | ||
412 | break; | ||
413 | |||
414 | case PPTP_OUT_CALL_REQUEST: | ||
415 | /* client initiating connection to server */ | ||
416 | if (info->sstate != PPTP_SESSION_CONFIRMED) | ||
417 | goto invalid; | ||
418 | info->cstate = PPTP_CALL_OUT_REQ; | ||
419 | /* track PNS call id */ | ||
420 | cid = pptpReq->ocreq.callID; | ||
421 | DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); | ||
422 | info->pns_call_id = cid; | ||
423 | break; | ||
424 | |||
425 | case PPTP_IN_CALL_REPLY: | ||
426 | /* client answers incoming call */ | ||
427 | if (info->cstate != PPTP_CALL_IN_REQ && | ||
428 | info->cstate != PPTP_CALL_IN_REP) | ||
429 | goto invalid; | ||
430 | |||
431 | cid = pptpReq->icack.callID; | ||
432 | pcid = pptpReq->icack.peersCallID; | ||
433 | if (info->pac_call_id != pcid) | ||
434 | goto invalid; | ||
435 | DEBUGP("%s, CID=%X PCID=%X\n", pptp_msg_name[msg], | ||
436 | ntohs(cid), ntohs(pcid)); | ||
437 | |||
438 | if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { | ||
439 | /* part two of the three-way handshake */ | ||
440 | info->cstate = PPTP_CALL_IN_REP; | ||
441 | info->pns_call_id = cid; | ||
442 | } else | ||
443 | info->cstate = PPTP_CALL_NONE; | ||
444 | break; | ||
445 | |||
446 | case PPTP_CALL_CLEAR_REQUEST: | ||
447 | /* client requests hangup of call */ | ||
448 | if (info->sstate != PPTP_SESSION_CONFIRMED) | ||
449 | goto invalid; | ||
450 | /* FUTURE: iterate over all calls and check if | ||
451 | * call ID is valid. We don't do this without newnat, | ||
452 | * because we only know about last call */ | ||
453 | info->cstate = PPTP_CALL_CLEAR_REQ; | ||
454 | break; | ||
455 | |||
456 | case PPTP_SET_LINK_INFO: | ||
457 | case PPTP_ECHO_REQUEST: | ||
458 | case PPTP_ECHO_REPLY: | ||
459 | /* I don't have to explain these ;) */ | ||
460 | break; | ||
461 | |||
462 | default: | ||
463 | goto invalid; | ||
464 | } | ||
465 | |||
466 | nf_nat_pptp_outbound = rcu_dereference(nf_nat_pptp_hook_outbound); | ||
467 | if (nf_nat_pptp_outbound && ct->status & IPS_NAT_MASK) | ||
468 | return nf_nat_pptp_outbound(pskb, ct, ctinfo, ctlh, pptpReq); | ||
469 | return NF_ACCEPT; | ||
470 | |||
471 | invalid: | ||
472 | DEBUGP("invalid %s: type=%d cid=%u pcid=%u " | ||
473 | "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", | ||
474 | msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], | ||
475 | msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, | ||
476 | ntohs(info->pns_call_id), ntohs(info->pac_call_id)); | ||
477 | return NF_ACCEPT; | ||
478 | } | ||
479 | |||
480 | static const unsigned int pptp_msg_size[] = { | ||
481 | [PPTP_START_SESSION_REQUEST] = sizeof(struct PptpStartSessionRequest), | ||
482 | [PPTP_START_SESSION_REPLY] = sizeof(struct PptpStartSessionReply), | ||
483 | [PPTP_STOP_SESSION_REQUEST] = sizeof(struct PptpStopSessionRequest), | ||
484 | [PPTP_STOP_SESSION_REPLY] = sizeof(struct PptpStopSessionReply), | ||
485 | [PPTP_OUT_CALL_REQUEST] = sizeof(struct PptpOutCallRequest), | ||
486 | [PPTP_OUT_CALL_REPLY] = sizeof(struct PptpOutCallReply), | ||
487 | [PPTP_IN_CALL_REQUEST] = sizeof(struct PptpInCallRequest), | ||
488 | [PPTP_IN_CALL_REPLY] = sizeof(struct PptpInCallReply), | ||
489 | [PPTP_IN_CALL_CONNECT] = sizeof(struct PptpInCallConnected), | ||
490 | [PPTP_CALL_CLEAR_REQUEST] = sizeof(struct PptpClearCallRequest), | ||
491 | [PPTP_CALL_DISCONNECT_NOTIFY] = sizeof(struct PptpCallDisconnectNotify), | ||
492 | [PPTP_WAN_ERROR_NOTIFY] = sizeof(struct PptpWanErrorNotify), | ||
493 | [PPTP_SET_LINK_INFO] = sizeof(struct PptpSetLinkInfo), | ||
494 | }; | ||
495 | |||
496 | /* track caller id inside control connection, call expect_related */ | ||
497 | static int | ||
498 | conntrack_pptp_help(struct sk_buff **pskb, unsigned int protoff, | ||
499 | struct nf_conn *ct, enum ip_conntrack_info ctinfo) | ||
500 | |||
501 | { | ||
502 | int dir = CTINFO2DIR(ctinfo); | ||
503 | struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info; | ||
504 | struct tcphdr _tcph, *tcph; | ||
505 | struct pptp_pkt_hdr _pptph, *pptph; | ||
506 | struct PptpControlHeader _ctlh, *ctlh; | ||
507 | union pptp_ctrl_union _pptpReq, *pptpReq; | ||
508 | unsigned int tcplen = (*pskb)->len - protoff; | ||
509 | unsigned int datalen, reqlen, nexthdr_off; | ||
510 | int oldsstate, oldcstate; | ||
511 | int ret; | ||
512 | u_int16_t msg; | ||
513 | |||
514 | /* don't do any tracking before tcp handshake complete */ | ||
515 | if (ctinfo != IP_CT_ESTABLISHED && | ||
516 | ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) | ||
517 | return NF_ACCEPT; | ||
518 | |||
519 | nexthdr_off = protoff; | ||
520 | tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph); | ||
521 | BUG_ON(!tcph); | ||
522 | nexthdr_off += tcph->doff * 4; | ||
523 | datalen = tcplen - tcph->doff * 4; | ||
524 | |||
525 | pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph); | ||
526 | if (!pptph) { | ||
527 | DEBUGP("no full PPTP header, can't track\n"); | ||
528 | return NF_ACCEPT; | ||
529 | } | ||
530 | nexthdr_off += sizeof(_pptph); | ||
531 | datalen -= sizeof(_pptph); | ||
532 | |||
533 | /* if it's not a control message we can't do anything with it */ | ||
534 | if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL || | ||
535 | ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) { | ||
536 | DEBUGP("not a control packet\n"); | ||
537 | return NF_ACCEPT; | ||
538 | } | ||
539 | |||
540 | ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh); | ||
541 | if (!ctlh) | ||
542 | return NF_ACCEPT; | ||
543 | nexthdr_off += sizeof(_ctlh); | ||
544 | datalen -= sizeof(_ctlh); | ||
545 | |||
546 | reqlen = datalen; | ||
547 | msg = ntohs(ctlh->messageType); | ||
548 | if (msg > 0 && msg <= PPTP_MSG_MAX && reqlen < pptp_msg_size[msg]) | ||
549 | return NF_ACCEPT; | ||
550 | if (reqlen > sizeof(*pptpReq)) | ||
551 | reqlen = sizeof(*pptpReq); | ||
552 | |||
553 | pptpReq = skb_header_pointer(*pskb, nexthdr_off, reqlen, &_pptpReq); | ||
554 | if (!pptpReq) | ||
555 | return NF_ACCEPT; | ||
556 | |||
557 | oldsstate = info->sstate; | ||
558 | oldcstate = info->cstate; | ||
559 | |||
560 | spin_lock_bh(&nf_pptp_lock); | ||
561 | |||
562 | /* FIXME: We just blindly assume that the control connection is always | ||
563 | * established from PNS->PAC. However, RFC makes no guarantee */ | ||
564 | if (dir == IP_CT_DIR_ORIGINAL) | ||
565 | /* client -> server (PNS -> PAC) */ | ||
566 | ret = pptp_outbound_pkt(pskb, ctlh, pptpReq, reqlen, ct, | ||
567 | ctinfo); | ||
568 | else | ||
569 | /* server -> client (PAC -> PNS) */ | ||
570 | ret = pptp_inbound_pkt(pskb, ctlh, pptpReq, reqlen, ct, | ||
571 | ctinfo); | ||
572 | DEBUGP("sstate: %d->%d, cstate: %d->%d\n", | ||
573 | oldsstate, info->sstate, oldcstate, info->cstate); | ||
574 | spin_unlock_bh(&nf_pptp_lock); | ||
575 | |||
576 | return ret; | ||
577 | } | ||
578 | |||
579 | /* control protocol helper */ | ||
580 | static struct nf_conntrack_helper pptp __read_mostly = { | ||
581 | .name = "pptp", | ||
582 | .me = THIS_MODULE, | ||
583 | .max_expected = 2, | ||
584 | .timeout = 5 * 60, | ||
585 | .tuple.src.l3num = AF_INET, | ||
586 | .tuple.src.u.tcp.port = __constant_htons(PPTP_CONTROL_PORT), | ||
587 | .tuple.dst.protonum = IPPROTO_TCP, | ||
588 | .mask.src.l3num = 0xffff, | ||
589 | .mask.src.u.tcp.port = __constant_htons(0xffff), | ||
590 | .mask.dst.protonum = 0xff, | ||
591 | .help = conntrack_pptp_help, | ||
592 | .destroy = pptp_destroy_siblings, | ||
593 | }; | ||
594 | |||
595 | static int __init nf_conntrack_pptp_init(void) | ||
596 | { | ||
597 | return nf_conntrack_helper_register(&pptp); | ||
598 | } | ||
599 | |||
600 | static void __exit nf_conntrack_pptp_fini(void) | ||
601 | { | ||
602 | nf_conntrack_helper_unregister(&pptp); | ||
603 | nf_ct_gre_keymap_flush(); | ||
604 | } | ||
605 | |||
606 | module_init(nf_conntrack_pptp_init); | ||
607 | module_exit(nf_conntrack_pptp_fini); | ||
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c new file mode 100644 index 000000000000..1a61b72712cd --- /dev/null +++ b/net/netfilter/nf_conntrack_proto.c | |||
@@ -0,0 +1,410 @@ | |||
1 | /* L3/L4 protocol support for nf_conntrack. */ | ||
2 | |||
3 | /* (C) 1999-2001 Paul `Rusty' Russell | ||
4 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | ||
5 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/netfilter.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/mutex.h> | ||
16 | #include <linux/skbuff.h> | ||
17 | #include <linux/vmalloc.h> | ||
18 | #include <linux/stddef.h> | ||
19 | #include <linux/err.h> | ||
20 | #include <linux/percpu.h> | ||
21 | #include <linux/moduleparam.h> | ||
22 | #include <linux/notifier.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/netdevice.h> | ||
25 | |||
26 | #include <net/netfilter/nf_conntrack.h> | ||
27 | #include <net/netfilter/nf_conntrack_l3proto.h> | ||
28 | #include <net/netfilter/nf_conntrack_l4proto.h> | ||
29 | #include <net/netfilter/nf_conntrack_core.h> | ||
30 | |||
31 | struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX] __read_mostly; | ||
32 | struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX] __read_mostly; | ||
33 | EXPORT_SYMBOL_GPL(nf_ct_l3protos); | ||
34 | |||
35 | #ifdef CONFIG_SYSCTL | ||
36 | static DEFINE_MUTEX(nf_ct_proto_sysctl_mutex); | ||
37 | |||
38 | static int | ||
39 | nf_ct_register_sysctl(struct ctl_table_header **header, struct ctl_table *path, | ||
40 | struct ctl_table *table, unsigned int *users) | ||
41 | { | ||
42 | if (*header == NULL) { | ||
43 | *header = nf_register_sysctl_table(path, table); | ||
44 | if (*header == NULL) | ||
45 | return -ENOMEM; | ||
46 | } | ||
47 | if (users != NULL) | ||
48 | (*users)++; | ||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | static void | ||
53 | nf_ct_unregister_sysctl(struct ctl_table_header **header, | ||
54 | struct ctl_table *table, unsigned int *users) | ||
55 | { | ||
56 | if (users != NULL && --*users > 0) | ||
57 | return; | ||
58 | nf_unregister_sysctl_table(*header, table); | ||
59 | *header = NULL; | ||
60 | } | ||
61 | #endif | ||
62 | |||
63 | struct nf_conntrack_l4proto * | ||
64 | __nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto) | ||
65 | { | ||
66 | if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL)) | ||
67 | return &nf_conntrack_l4proto_generic; | ||
68 | |||
69 | return nf_ct_protos[l3proto][l4proto]; | ||
70 | } | ||
71 | EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find); | ||
72 | |||
73 | /* this is guaranteed to always return a valid protocol helper, since | ||
74 | * it falls back to generic_protocol */ | ||
75 | struct nf_conntrack_l4proto * | ||
76 | nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto) | ||
77 | { | ||
78 | struct nf_conntrack_l4proto *p; | ||
79 | |||
80 | preempt_disable(); | ||
81 | p = __nf_ct_l4proto_find(l3proto, l4proto); | ||
82 | if (!try_module_get(p->me)) | ||
83 | p = &nf_conntrack_l4proto_generic; | ||
84 | preempt_enable(); | ||
85 | |||
86 | return p; | ||
87 | } | ||
88 | EXPORT_SYMBOL_GPL(nf_ct_l4proto_find_get); | ||
89 | |||
90 | void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p) | ||
91 | { | ||
92 | module_put(p->me); | ||
93 | } | ||
94 | EXPORT_SYMBOL_GPL(nf_ct_l4proto_put); | ||
95 | |||
96 | struct nf_conntrack_l3proto * | ||
97 | nf_ct_l3proto_find_get(u_int16_t l3proto) | ||
98 | { | ||
99 | struct nf_conntrack_l3proto *p; | ||
100 | |||
101 | preempt_disable(); | ||
102 | p = __nf_ct_l3proto_find(l3proto); | ||
103 | if (!try_module_get(p->me)) | ||
104 | p = &nf_conntrack_l3proto_generic; | ||
105 | preempt_enable(); | ||
106 | |||
107 | return p; | ||
108 | } | ||
109 | EXPORT_SYMBOL_GPL(nf_ct_l3proto_find_get); | ||
110 | |||
111 | void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p) | ||
112 | { | ||
113 | module_put(p->me); | ||
114 | } | ||
115 | EXPORT_SYMBOL_GPL(nf_ct_l3proto_put); | ||
116 | |||
117 | int | ||
118 | nf_ct_l3proto_try_module_get(unsigned short l3proto) | ||
119 | { | ||
120 | int ret; | ||
121 | struct nf_conntrack_l3proto *p; | ||
122 | |||
123 | retry: p = nf_ct_l3proto_find_get(l3proto); | ||
124 | if (p == &nf_conntrack_l3proto_generic) { | ||
125 | ret = request_module("nf_conntrack-%d", l3proto); | ||
126 | if (!ret) | ||
127 | goto retry; | ||
128 | |||
129 | return -EPROTOTYPE; | ||
130 | } | ||
131 | |||
132 | return 0; | ||
133 | } | ||
134 | EXPORT_SYMBOL_GPL(nf_ct_l3proto_try_module_get); | ||
135 | |||
136 | void nf_ct_l3proto_module_put(unsigned short l3proto) | ||
137 | { | ||
138 | struct nf_conntrack_l3proto *p; | ||
139 | |||
140 | preempt_disable(); | ||
141 | p = __nf_ct_l3proto_find(l3proto); | ||
142 | preempt_enable(); | ||
143 | |||
144 | module_put(p->me); | ||
145 | } | ||
146 | EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put); | ||
147 | |||
148 | static int kill_l3proto(struct nf_conn *i, void *data) | ||
149 | { | ||
150 | return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num == | ||
151 | ((struct nf_conntrack_l3proto *)data)->l3proto); | ||
152 | } | ||
153 | |||
154 | static int kill_l4proto(struct nf_conn *i, void *data) | ||
155 | { | ||
156 | struct nf_conntrack_l4proto *l4proto; | ||
157 | l4proto = (struct nf_conntrack_l4proto *)data; | ||
158 | return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == | ||
159 | l4proto->l4proto) && | ||
160 | (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num == | ||
161 | l4proto->l3proto); | ||
162 | } | ||
163 | |||
164 | static int nf_ct_l3proto_register_sysctl(struct nf_conntrack_l3proto *l3proto) | ||
165 | { | ||
166 | int err = 0; | ||
167 | |||
168 | #ifdef CONFIG_SYSCTL | ||
169 | mutex_lock(&nf_ct_proto_sysctl_mutex); | ||
170 | if (l3proto->ctl_table != NULL) { | ||
171 | err = nf_ct_register_sysctl(&l3proto->ctl_table_header, | ||
172 | l3proto->ctl_table_path, | ||
173 | l3proto->ctl_table, NULL); | ||
174 | } | ||
175 | mutex_unlock(&nf_ct_proto_sysctl_mutex); | ||
176 | #endif | ||
177 | return err; | ||
178 | } | ||
179 | |||
180 | static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto) | ||
181 | { | ||
182 | #ifdef CONFIG_SYSCTL | ||
183 | mutex_lock(&nf_ct_proto_sysctl_mutex); | ||
184 | if (l3proto->ctl_table_header != NULL) | ||
185 | nf_ct_unregister_sysctl(&l3proto->ctl_table_header, | ||
186 | l3proto->ctl_table, NULL); | ||
187 | mutex_unlock(&nf_ct_proto_sysctl_mutex); | ||
188 | #endif | ||
189 | } | ||
190 | |||
191 | int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto) | ||
192 | { | ||
193 | int ret = 0; | ||
194 | |||
195 | if (proto->l3proto >= AF_MAX) { | ||
196 | ret = -EBUSY; | ||
197 | goto out; | ||
198 | } | ||
199 | |||
200 | write_lock_bh(&nf_conntrack_lock); | ||
201 | if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) { | ||
202 | ret = -EBUSY; | ||
203 | goto out_unlock; | ||
204 | } | ||
205 | nf_ct_l3protos[proto->l3proto] = proto; | ||
206 | write_unlock_bh(&nf_conntrack_lock); | ||
207 | |||
208 | ret = nf_ct_l3proto_register_sysctl(proto); | ||
209 | if (ret < 0) | ||
210 | nf_conntrack_l3proto_unregister(proto); | ||
211 | return ret; | ||
212 | |||
213 | out_unlock: | ||
214 | write_unlock_bh(&nf_conntrack_lock); | ||
215 | out: | ||
216 | return ret; | ||
217 | } | ||
218 | EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register); | ||
219 | |||
220 | int nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto) | ||
221 | { | ||
222 | int ret = 0; | ||
223 | |||
224 | if (proto->l3proto >= AF_MAX) { | ||
225 | ret = -EBUSY; | ||
226 | goto out; | ||
227 | } | ||
228 | |||
229 | write_lock_bh(&nf_conntrack_lock); | ||
230 | if (nf_ct_l3protos[proto->l3proto] != proto) { | ||
231 | write_unlock_bh(&nf_conntrack_lock); | ||
232 | ret = -EBUSY; | ||
233 | goto out; | ||
234 | } | ||
235 | |||
236 | nf_ct_l3protos[proto->l3proto] = &nf_conntrack_l3proto_generic; | ||
237 | write_unlock_bh(&nf_conntrack_lock); | ||
238 | |||
239 | nf_ct_l3proto_unregister_sysctl(proto); | ||
240 | |||
241 | /* Somebody could be still looking at the proto in bh. */ | ||
242 | synchronize_net(); | ||
243 | |||
244 | /* Remove all contrack entries for this protocol */ | ||
245 | nf_ct_iterate_cleanup(kill_l3proto, proto); | ||
246 | |||
247 | out: | ||
248 | return ret; | ||
249 | } | ||
250 | EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister); | ||
251 | |||
252 | static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto) | ||
253 | { | ||
254 | int err = 0; | ||
255 | |||
256 | #ifdef CONFIG_SYSCTL | ||
257 | mutex_lock(&nf_ct_proto_sysctl_mutex); | ||
258 | if (l4proto->ctl_table != NULL) { | ||
259 | err = nf_ct_register_sysctl(l4proto->ctl_table_header, | ||
260 | nf_net_netfilter_sysctl_path, | ||
261 | l4proto->ctl_table, | ||
262 | l4proto->ctl_table_users); | ||
263 | if (err < 0) | ||
264 | goto out; | ||
265 | } | ||
266 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT | ||
267 | if (l4proto->ctl_compat_table != NULL) { | ||
268 | err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header, | ||
269 | nf_net_ipv4_netfilter_sysctl_path, | ||
270 | l4proto->ctl_compat_table, NULL); | ||
271 | if (err == 0) | ||
272 | goto out; | ||
273 | nf_ct_unregister_sysctl(l4proto->ctl_table_header, | ||
274 | l4proto->ctl_table, | ||
275 | l4proto->ctl_table_users); | ||
276 | } | ||
277 | #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ | ||
278 | out: | ||
279 | mutex_unlock(&nf_ct_proto_sysctl_mutex); | ||
280 | #endif /* CONFIG_SYSCTL */ | ||
281 | return err; | ||
282 | } | ||
283 | |||
284 | static void nf_ct_l4proto_unregister_sysctl(struct nf_conntrack_l4proto *l4proto) | ||
285 | { | ||
286 | #ifdef CONFIG_SYSCTL | ||
287 | mutex_lock(&nf_ct_proto_sysctl_mutex); | ||
288 | if (l4proto->ctl_table_header != NULL && | ||
289 | *l4proto->ctl_table_header != NULL) | ||
290 | nf_ct_unregister_sysctl(l4proto->ctl_table_header, | ||
291 | l4proto->ctl_table, | ||
292 | l4proto->ctl_table_users); | ||
293 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT | ||
294 | if (l4proto->ctl_compat_table_header != NULL) | ||
295 | nf_ct_unregister_sysctl(&l4proto->ctl_compat_table_header, | ||
296 | l4proto->ctl_compat_table, NULL); | ||
297 | #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ | ||
298 | mutex_unlock(&nf_ct_proto_sysctl_mutex); | ||
299 | #endif /* CONFIG_SYSCTL */ | ||
300 | } | ||
301 | |||
302 | /* FIXME: Allow NULL functions and sub in pointers to generic for | ||
303 | them. --RR */ | ||
304 | int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto) | ||
305 | { | ||
306 | int ret = 0; | ||
307 | |||
308 | if (l4proto->l3proto >= PF_MAX) { | ||
309 | ret = -EBUSY; | ||
310 | goto out; | ||
311 | } | ||
312 | |||
313 | if (l4proto == &nf_conntrack_l4proto_generic) | ||
314 | return nf_ct_l4proto_register_sysctl(l4proto); | ||
315 | |||
316 | retry: | ||
317 | write_lock_bh(&nf_conntrack_lock); | ||
318 | if (nf_ct_protos[l4proto->l3proto]) { | ||
319 | if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] | ||
320 | != &nf_conntrack_l4proto_generic) { | ||
321 | ret = -EBUSY; | ||
322 | goto out_unlock; | ||
323 | } | ||
324 | } else { | ||
325 | /* l3proto may be loaded latter. */ | ||
326 | struct nf_conntrack_l4proto **proto_array; | ||
327 | int i; | ||
328 | |||
329 | write_unlock_bh(&nf_conntrack_lock); | ||
330 | |||
331 | proto_array = (struct nf_conntrack_l4proto **) | ||
332 | kmalloc(MAX_NF_CT_PROTO * | ||
333 | sizeof(struct nf_conntrack_l4proto *), | ||
334 | GFP_KERNEL); | ||
335 | if (proto_array == NULL) { | ||
336 | ret = -ENOMEM; | ||
337 | goto out; | ||
338 | } | ||
339 | for (i = 0; i < MAX_NF_CT_PROTO; i++) | ||
340 | proto_array[i] = &nf_conntrack_l4proto_generic; | ||
341 | |||
342 | write_lock_bh(&nf_conntrack_lock); | ||
343 | if (nf_ct_protos[l4proto->l3proto]) { | ||
344 | /* bad timing, but no problem */ | ||
345 | write_unlock_bh(&nf_conntrack_lock); | ||
346 | kfree(proto_array); | ||
347 | } else { | ||
348 | nf_ct_protos[l4proto->l3proto] = proto_array; | ||
349 | write_unlock_bh(&nf_conntrack_lock); | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * Just once because array is never freed until unloading | ||
354 | * nf_conntrack.ko | ||
355 | */ | ||
356 | goto retry; | ||
357 | } | ||
358 | |||
359 | nf_ct_protos[l4proto->l3proto][l4proto->l4proto] = l4proto; | ||
360 | write_unlock_bh(&nf_conntrack_lock); | ||
361 | |||
362 | ret = nf_ct_l4proto_register_sysctl(l4proto); | ||
363 | if (ret < 0) | ||
364 | nf_conntrack_l4proto_unregister(l4proto); | ||
365 | return ret; | ||
366 | |||
367 | out_unlock: | ||
368 | write_unlock_bh(&nf_conntrack_lock); | ||
369 | out: | ||
370 | return ret; | ||
371 | } | ||
372 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register); | ||
373 | |||
374 | int nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto) | ||
375 | { | ||
376 | int ret = 0; | ||
377 | |||
378 | if (l4proto->l3proto >= PF_MAX) { | ||
379 | ret = -EBUSY; | ||
380 | goto out; | ||
381 | } | ||
382 | |||
383 | if (l4proto == &nf_conntrack_l4proto_generic) { | ||
384 | nf_ct_l4proto_unregister_sysctl(l4proto); | ||
385 | goto out; | ||
386 | } | ||
387 | |||
388 | write_lock_bh(&nf_conntrack_lock); | ||
389 | if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] | ||
390 | != l4proto) { | ||
391 | write_unlock_bh(&nf_conntrack_lock); | ||
392 | ret = -EBUSY; | ||
393 | goto out; | ||
394 | } | ||
395 | nf_ct_protos[l4proto->l3proto][l4proto->l4proto] | ||
396 | = &nf_conntrack_l4proto_generic; | ||
397 | write_unlock_bh(&nf_conntrack_lock); | ||
398 | |||
399 | nf_ct_l4proto_unregister_sysctl(l4proto); | ||
400 | |||
401 | /* Somebody could be still looking at the proto in bh. */ | ||
402 | synchronize_net(); | ||
403 | |||
404 | /* Remove all contrack entries for this protocol */ | ||
405 | nf_ct_iterate_cleanup(kill_l4proto, l4proto); | ||
406 | |||
407 | out: | ||
408 | return ret; | ||
409 | } | ||
410 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister); | ||
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c index 26408bb0955b..69902531c236 100644 --- a/net/netfilter/nf_conntrack_proto_generic.c +++ b/net/netfilter/nf_conntrack_proto_generic.c | |||
@@ -15,9 +15,9 @@ | |||
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/timer.h> | 16 | #include <linux/timer.h> |
17 | #include <linux/netfilter.h> | 17 | #include <linux/netfilter.h> |
18 | #include <net/netfilter/nf_conntrack_protocol.h> | 18 | #include <net/netfilter/nf_conntrack_l4proto.h> |
19 | 19 | ||
20 | unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ; | 20 | static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ; |
21 | 21 | ||
22 | static int generic_pkt_to_tuple(const struct sk_buff *skb, | 22 | static int generic_pkt_to_tuple(const struct sk_buff *skb, |
23 | unsigned int dataoff, | 23 | unsigned int dataoff, |
@@ -71,10 +71,42 @@ static int new(struct nf_conn *conntrack, const struct sk_buff *skb, | |||
71 | return 1; | 71 | return 1; |
72 | } | 72 | } |
73 | 73 | ||
74 | struct nf_conntrack_protocol nf_conntrack_generic_protocol = | 74 | #ifdef CONFIG_SYSCTL |
75 | static struct ctl_table_header *generic_sysctl_header; | ||
76 | static struct ctl_table generic_sysctl_table[] = { | ||
77 | { | ||
78 | .ctl_name = NET_NF_CONNTRACK_GENERIC_TIMEOUT, | ||
79 | .procname = "nf_conntrack_generic_timeout", | ||
80 | .data = &nf_ct_generic_timeout, | ||
81 | .maxlen = sizeof(unsigned int), | ||
82 | .mode = 0644, | ||
83 | .proc_handler = &proc_dointvec_jiffies, | ||
84 | }, | ||
85 | { | ||
86 | .ctl_name = 0 | ||
87 | } | ||
88 | }; | ||
89 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT | ||
90 | static struct ctl_table generic_compat_sysctl_table[] = { | ||
91 | { | ||
92 | .ctl_name = NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT, | ||
93 | .procname = "ip_conntrack_generic_timeout", | ||
94 | .data = &nf_ct_generic_timeout, | ||
95 | .maxlen = sizeof(unsigned int), | ||
96 | .mode = 0644, | ||
97 | .proc_handler = &proc_dointvec_jiffies, | ||
98 | }, | ||
99 | { | ||
100 | .ctl_name = 0 | ||
101 | } | ||
102 | }; | ||
103 | #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ | ||
104 | #endif /* CONFIG_SYSCTL */ | ||
105 | |||
106 | struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = | ||
75 | { | 107 | { |
76 | .l3proto = PF_UNSPEC, | 108 | .l3proto = PF_UNSPEC, |
77 | .proto = 0, | 109 | .l4proto = 0, |
78 | .name = "unknown", | 110 | .name = "unknown", |
79 | .pkt_to_tuple = generic_pkt_to_tuple, | 111 | .pkt_to_tuple = generic_pkt_to_tuple, |
80 | .invert_tuple = generic_invert_tuple, | 112 | .invert_tuple = generic_invert_tuple, |
@@ -82,4 +114,11 @@ struct nf_conntrack_protocol nf_conntrack_generic_protocol = | |||
82 | .print_conntrack = generic_print_conntrack, | 114 | .print_conntrack = generic_print_conntrack, |
83 | .packet = packet, | 115 | .packet = packet, |
84 | .new = new, | 116 | .new = new, |
117 | #ifdef CONFIG_SYSCTL | ||
118 | .ctl_table_header = &generic_sysctl_header, | ||
119 | .ctl_table = generic_sysctl_table, | ||
120 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT | ||
121 | .ctl_compat_table = generic_compat_sysctl_table, | ||
122 | #endif | ||
123 | #endif | ||
85 | }; | 124 | }; |
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c new file mode 100644 index 000000000000..ac193ce70249 --- /dev/null +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
@@ -0,0 +1,305 @@ | |||
1 | /* | ||
2 | * ip_conntrack_proto_gre.c - Version 3.0 | ||
3 | * | ||
4 | * Connection tracking protocol helper module for GRE. | ||
5 | * | ||
6 | * GRE is a generic encapsulation protocol, which is generally not very | ||
7 | * suited for NAT, as it has no protocol-specific part as port numbers. | ||
8 | * | ||
9 | * It has an optional key field, which may help us distinguishing two | ||
10 | * connections between the same two hosts. | ||
11 | * | ||
12 | * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784 | ||
13 | * | ||
14 | * PPTP is built on top of a modified version of GRE, and has a mandatory | ||
15 | * field called "CallID", which serves us for the same purpose as the key | ||
16 | * field in plain GRE. | ||
17 | * | ||
18 | * Documentation about PPTP can be found in RFC 2637 | ||
19 | * | ||
20 | * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org> | ||
21 | * | ||
22 | * Development of this code funded by Astaro AG (http://www.astaro.com/) | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/timer.h> | ||
29 | #include <linux/list.h> | ||
30 | #include <linux/seq_file.h> | ||
31 | #include <linux/in.h> | ||
32 | #include <linux/skbuff.h> | ||
33 | |||
34 | #include <net/netfilter/nf_conntrack_l4proto.h> | ||
35 | #include <net/netfilter/nf_conntrack_helper.h> | ||
36 | #include <net/netfilter/nf_conntrack_core.h> | ||
37 | #include <linux/netfilter/nf_conntrack_proto_gre.h> | ||
38 | #include <linux/netfilter/nf_conntrack_pptp.h> | ||
39 | |||
40 | #define GRE_TIMEOUT (30 * HZ) | ||
41 | #define GRE_STREAM_TIMEOUT (180 * HZ) | ||
42 | |||
43 | #if 0 | ||
44 | #define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args) | ||
45 | #else | ||
46 | #define DEBUGP(x, args...) | ||
47 | #endif | ||
48 | |||
49 | static DEFINE_RWLOCK(nf_ct_gre_lock); | ||
50 | static LIST_HEAD(gre_keymap_list); | ||
51 | |||
52 | void nf_ct_gre_keymap_flush(void) | ||
53 | { | ||
54 | struct list_head *pos, *n; | ||
55 | |||
56 | write_lock_bh(&nf_ct_gre_lock); | ||
57 | list_for_each_safe(pos, n, &gre_keymap_list) { | ||
58 | list_del(pos); | ||
59 | kfree(pos); | ||
60 | } | ||
61 | write_unlock_bh(&nf_ct_gre_lock); | ||
62 | } | ||
63 | EXPORT_SYMBOL(nf_ct_gre_keymap_flush); | ||
64 | |||
65 | static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km, | ||
66 | const struct nf_conntrack_tuple *t) | ||
67 | { | ||
68 | return km->tuple.src.l3num == t->src.l3num && | ||
69 | !memcmp(&km->tuple.src.u3, &t->src.u3, sizeof(t->src.u3)) && | ||
70 | !memcmp(&km->tuple.dst.u3, &t->dst.u3, sizeof(t->dst.u3)) && | ||
71 | km->tuple.dst.protonum == t->dst.protonum && | ||
72 | km->tuple.dst.u.all == t->dst.u.all; | ||
73 | } | ||
74 | |||
75 | /* look up the source key for a given tuple */ | ||
76 | static __be16 gre_keymap_lookup(struct nf_conntrack_tuple *t) | ||
77 | { | ||
78 | struct nf_ct_gre_keymap *km; | ||
79 | __be16 key = 0; | ||
80 | |||
81 | read_lock_bh(&nf_ct_gre_lock); | ||
82 | list_for_each_entry(km, &gre_keymap_list, list) { | ||
83 | if (gre_key_cmpfn(km, t)) { | ||
84 | key = km->tuple.src.u.gre.key; | ||
85 | break; | ||
86 | } | ||
87 | } | ||
88 | read_unlock_bh(&nf_ct_gre_lock); | ||
89 | |||
90 | DEBUGP("lookup src key 0x%x for ", key); | ||
91 | NF_CT_DUMP_TUPLE(t); | ||
92 | |||
93 | return key; | ||
94 | } | ||
95 | |||
96 | /* add a single keymap entry, associate with specified master ct */ | ||
97 | int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, | ||
98 | struct nf_conntrack_tuple *t) | ||
99 | { | ||
100 | struct nf_conn_help *help = nfct_help(ct); | ||
101 | struct nf_ct_gre_keymap **kmp, *km; | ||
102 | |||
103 | BUG_ON(strcmp(help->helper->name, "pptp")); | ||
104 | kmp = &help->help.ct_pptp_info.keymap[dir]; | ||
105 | if (*kmp) { | ||
106 | /* check whether it's a retransmission */ | ||
107 | list_for_each_entry(km, &gre_keymap_list, list) { | ||
108 | if (gre_key_cmpfn(km, t) && km == *kmp) | ||
109 | return 0; | ||
110 | } | ||
111 | DEBUGP("trying to override keymap_%s for ct %p\n", | ||
112 | dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct); | ||
113 | return -EEXIST; | ||
114 | } | ||
115 | |||
116 | km = kmalloc(sizeof(*km), GFP_ATOMIC); | ||
117 | if (!km) | ||
118 | return -ENOMEM; | ||
119 | memcpy(&km->tuple, t, sizeof(*t)); | ||
120 | *kmp = km; | ||
121 | |||
122 | DEBUGP("adding new entry %p: ", km); | ||
123 | NF_CT_DUMP_TUPLE(&km->tuple); | ||
124 | |||
125 | write_lock_bh(&nf_ct_gre_lock); | ||
126 | list_add_tail(&km->list, &gre_keymap_list); | ||
127 | write_unlock_bh(&nf_ct_gre_lock); | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_add); | ||
132 | |||
133 | /* destroy the keymap entries associated with specified master ct */ | ||
134 | void nf_ct_gre_keymap_destroy(struct nf_conn *ct) | ||
135 | { | ||
136 | struct nf_conn_help *help = nfct_help(ct); | ||
137 | enum ip_conntrack_dir dir; | ||
138 | |||
139 | DEBUGP("entering for ct %p\n", ct); | ||
140 | BUG_ON(strcmp(help->helper->name, "pptp")); | ||
141 | |||
142 | write_lock_bh(&nf_ct_gre_lock); | ||
143 | for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) { | ||
144 | if (help->help.ct_pptp_info.keymap[dir]) { | ||
145 | DEBUGP("removing %p from list\n", | ||
146 | help->help.ct_pptp_info.keymap[dir]); | ||
147 | list_del(&help->help.ct_pptp_info.keymap[dir]->list); | ||
148 | kfree(help->help.ct_pptp_info.keymap[dir]); | ||
149 | help->help.ct_pptp_info.keymap[dir] = NULL; | ||
150 | } | ||
151 | } | ||
152 | write_unlock_bh(&nf_ct_gre_lock); | ||
153 | } | ||
154 | EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_destroy); | ||
155 | |||
156 | /* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */ | ||
157 | |||
158 | /* invert gre part of tuple */ | ||
159 | static int gre_invert_tuple(struct nf_conntrack_tuple *tuple, | ||
160 | const struct nf_conntrack_tuple *orig) | ||
161 | { | ||
162 | tuple->dst.u.gre.key = orig->src.u.gre.key; | ||
163 | tuple->src.u.gre.key = orig->dst.u.gre.key; | ||
164 | return 1; | ||
165 | } | ||
166 | |||
167 | /* gre hdr info to tuple */ | ||
168 | static int gre_pkt_to_tuple(const struct sk_buff *skb, | ||
169 | unsigned int dataoff, | ||
170 | struct nf_conntrack_tuple *tuple) | ||
171 | { | ||
172 | struct gre_hdr_pptp _pgrehdr, *pgrehdr; | ||
173 | __be16 srckey; | ||
174 | struct gre_hdr _grehdr, *grehdr; | ||
175 | |||
176 | /* first only delinearize old RFC1701 GRE header */ | ||
177 | grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr); | ||
178 | if (!grehdr || grehdr->version != GRE_VERSION_PPTP) { | ||
179 | /* try to behave like "nf_conntrack_proto_generic" */ | ||
180 | tuple->src.u.all = 0; | ||
181 | tuple->dst.u.all = 0; | ||
182 | return 1; | ||
183 | } | ||
184 | |||
185 | /* PPTP header is variable length, only need up to the call_id field */ | ||
186 | pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr); | ||
187 | if (!pgrehdr) | ||
188 | return 1; | ||
189 | |||
190 | if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) { | ||
191 | DEBUGP("GRE_VERSION_PPTP but unknown proto\n"); | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | tuple->dst.u.gre.key = pgrehdr->call_id; | ||
196 | srckey = gre_keymap_lookup(tuple); | ||
197 | tuple->src.u.gre.key = srckey; | ||
198 | |||
199 | return 1; | ||
200 | } | ||
201 | |||
202 | /* print gre part of tuple */ | ||
203 | static int gre_print_tuple(struct seq_file *s, | ||
204 | const struct nf_conntrack_tuple *tuple) | ||
205 | { | ||
206 | return seq_printf(s, "srckey=0x%x dstkey=0x%x ", | ||
207 | ntohs(tuple->src.u.gre.key), | ||
208 | ntohs(tuple->dst.u.gre.key)); | ||
209 | } | ||
210 | |||
211 | /* print private data for conntrack */ | ||
212 | static int gre_print_conntrack(struct seq_file *s, | ||
213 | const struct nf_conn *ct) | ||
214 | { | ||
215 | return seq_printf(s, "timeout=%u, stream_timeout=%u ", | ||
216 | (ct->proto.gre.timeout / HZ), | ||
217 | (ct->proto.gre.stream_timeout / HZ)); | ||
218 | } | ||
219 | |||
220 | /* Returns verdict for packet, and may modify conntrack */ | ||
221 | static int gre_packet(struct nf_conn *ct, | ||
222 | const struct sk_buff *skb, | ||
223 | unsigned int dataoff, | ||
224 | enum ip_conntrack_info ctinfo, | ||
225 | int pf, | ||
226 | unsigned int hooknum) | ||
227 | { | ||
228 | /* If we've seen traffic both ways, this is a GRE connection. | ||
229 | * Extend timeout. */ | ||
230 | if (ct->status & IPS_SEEN_REPLY) { | ||
231 | nf_ct_refresh_acct(ct, ctinfo, skb, | ||
232 | ct->proto.gre.stream_timeout); | ||
233 | /* Also, more likely to be important, and not a probe. */ | ||
234 | set_bit(IPS_ASSURED_BIT, &ct->status); | ||
235 | nf_conntrack_event_cache(IPCT_STATUS, skb); | ||
236 | } else | ||
237 | nf_ct_refresh_acct(ct, ctinfo, skb, | ||
238 | ct->proto.gre.timeout); | ||
239 | |||
240 | return NF_ACCEPT; | ||
241 | } | ||
242 | |||
243 | /* Called when a new connection for this protocol found. */ | ||
244 | static int gre_new(struct nf_conn *ct, const struct sk_buff *skb, | ||
245 | unsigned int dataoff) | ||
246 | { | ||
247 | DEBUGP(": "); | ||
248 | NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | ||
249 | |||
250 | /* initialize to sane value. Ideally a conntrack helper | ||
251 | * (e.g. in case of pptp) is increasing them */ | ||
252 | ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT; | ||
253 | ct->proto.gre.timeout = GRE_TIMEOUT; | ||
254 | |||
255 | return 1; | ||
256 | } | ||
257 | |||
258 | /* Called when a conntrack entry has already been removed from the hashes | ||
259 | * and is about to be deleted from memory */ | ||
260 | static void gre_destroy(struct nf_conn *ct) | ||
261 | { | ||
262 | struct nf_conn *master = ct->master; | ||
263 | DEBUGP(" entering\n"); | ||
264 | |||
265 | if (!master) | ||
266 | DEBUGP("no master !?!\n"); | ||
267 | else | ||
268 | nf_ct_gre_keymap_destroy(master); | ||
269 | } | ||
270 | |||
271 | /* protocol helper struct */ | ||
272 | static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { | ||
273 | .l3proto = AF_INET, | ||
274 | .l4proto = IPPROTO_GRE, | ||
275 | .name = "gre", | ||
276 | .pkt_to_tuple = gre_pkt_to_tuple, | ||
277 | .invert_tuple = gre_invert_tuple, | ||
278 | .print_tuple = gre_print_tuple, | ||
279 | .print_conntrack = gre_print_conntrack, | ||
280 | .packet = gre_packet, | ||
281 | .new = gre_new, | ||
282 | .destroy = gre_destroy, | ||
283 | .me = THIS_MODULE, | ||
284 | #if defined(CONFIG_NF_CONNTRACK_NETLINK) || \ | ||
285 | defined(CONFIG_NF_CONNTRACK_NETLINK_MODULE) | ||
286 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, | ||
287 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, | ||
288 | #endif | ||
289 | }; | ||
290 | |||
291 | static int __init nf_ct_proto_gre_init(void) | ||
292 | { | ||
293 | return nf_conntrack_l4proto_register(&nf_conntrack_l4proto_gre4); | ||
294 | } | ||
295 | |||
296 | static void nf_ct_proto_gre_fini(void) | ||
297 | { | ||
298 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4); | ||
299 | nf_ct_gre_keymap_flush(); | ||
300 | } | ||
301 | |||
302 | module_init(nf_ct_proto_gre_init); | ||
303 | module_exit(nf_ct_proto_gre_fini); | ||
304 | |||
305 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index af568777372b..76e263668222 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c | |||
@@ -32,7 +32,8 @@ | |||
32 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
33 | 33 | ||
34 | #include <net/netfilter/nf_conntrack.h> | 34 | #include <net/netfilter/nf_conntrack.h> |
35 | #include <net/netfilter/nf_conntrack_protocol.h> | 35 | #include <net/netfilter/nf_conntrack_l4proto.h> |
36 | #include <net/netfilter/nf_conntrack_ecache.h> | ||
36 | 37 | ||
37 | #if 0 | 38 | #if 0 |
38 | #define DEBUGP(format, ...) printk(format, ## __VA_ARGS__) | 39 | #define DEBUGP(format, ...) printk(format, ## __VA_ARGS__) |
@@ -216,7 +217,7 @@ static int sctp_print_conntrack(struct seq_file *s, | |||
216 | for (offset = dataoff + sizeof(sctp_sctphdr_t), count = 0; \ | 217 | for (offset = dataoff + sizeof(sctp_sctphdr_t), count = 0; \ |
217 | offset < skb->len && \ | 218 | offset < skb->len && \ |
218 | (sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch)); \ | 219 | (sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch)); \ |
219 | offset += (htons(sch->length) + 3) & ~3, count++) | 220 | offset += (ntohs(sch->length) + 3) & ~3, count++) |
220 | 221 | ||
221 | /* Some validity checks to make sure the chunks are fine */ | 222 | /* Some validity checks to make sure the chunks are fine */ |
222 | static int do_basic_checks(struct nf_conn *conntrack, | 223 | static int do_basic_checks(struct nf_conn *conntrack, |
@@ -508,36 +509,10 @@ static int sctp_new(struct nf_conn *conntrack, const struct sk_buff *skb, | |||
508 | return 1; | 509 | return 1; |
509 | } | 510 | } |
510 | 511 | ||
511 | struct nf_conntrack_protocol nf_conntrack_protocol_sctp4 = { | ||
512 | .l3proto = PF_INET, | ||
513 | .proto = IPPROTO_SCTP, | ||
514 | .name = "sctp", | ||
515 | .pkt_to_tuple = sctp_pkt_to_tuple, | ||
516 | .invert_tuple = sctp_invert_tuple, | ||
517 | .print_tuple = sctp_print_tuple, | ||
518 | .print_conntrack = sctp_print_conntrack, | ||
519 | .packet = sctp_packet, | ||
520 | .new = sctp_new, | ||
521 | .destroy = NULL, | ||
522 | .me = THIS_MODULE | ||
523 | }; | ||
524 | |||
525 | struct nf_conntrack_protocol nf_conntrack_protocol_sctp6 = { | ||
526 | .l3proto = PF_INET6, | ||
527 | .proto = IPPROTO_SCTP, | ||
528 | .name = "sctp", | ||
529 | .pkt_to_tuple = sctp_pkt_to_tuple, | ||
530 | .invert_tuple = sctp_invert_tuple, | ||
531 | .print_tuple = sctp_print_tuple, | ||
532 | .print_conntrack = sctp_print_conntrack, | ||
533 | .packet = sctp_packet, | ||
534 | .new = sctp_new, | ||
535 | .destroy = NULL, | ||
536 | .me = THIS_MODULE | ||
537 | }; | ||
538 | |||
539 | #ifdef CONFIG_SYSCTL | 512 | #ifdef CONFIG_SYSCTL |
540 | static ctl_table nf_ct_sysctl_table[] = { | 513 | static unsigned int sctp_sysctl_table_users; |
514 | static struct ctl_table_header *sctp_sysctl_header; | ||
515 | static struct ctl_table sctp_sysctl_table[] = { | ||
541 | { | 516 | { |
542 | .ctl_name = NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, | 517 | .ctl_name = NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, |
543 | .procname = "nf_conntrack_sctp_timeout_closed", | 518 | .procname = "nf_conntrack_sctp_timeout_closed", |
@@ -594,63 +569,134 @@ static ctl_table nf_ct_sysctl_table[] = { | |||
594 | .mode = 0644, | 569 | .mode = 0644, |
595 | .proc_handler = &proc_dointvec_jiffies, | 570 | .proc_handler = &proc_dointvec_jiffies, |
596 | }, | 571 | }, |
597 | { .ctl_name = 0 } | 572 | { |
573 | .ctl_name = 0 | ||
574 | } | ||
598 | }; | 575 | }; |
599 | 576 | ||
600 | static ctl_table nf_ct_netfilter_table[] = { | 577 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT |
578 | static struct ctl_table sctp_compat_sysctl_table[] = { | ||
601 | { | 579 | { |
602 | .ctl_name = NET_NETFILTER, | 580 | .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, |
603 | .procname = "netfilter", | 581 | .procname = "ip_conntrack_sctp_timeout_closed", |
604 | .mode = 0555, | 582 | .data = &nf_ct_sctp_timeout_closed, |
605 | .child = nf_ct_sysctl_table, | 583 | .maxlen = sizeof(unsigned int), |
584 | .mode = 0644, | ||
585 | .proc_handler = &proc_dointvec_jiffies, | ||
586 | }, | ||
587 | { | ||
588 | .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, | ||
589 | .procname = "ip_conntrack_sctp_timeout_cookie_wait", | ||
590 | .data = &nf_ct_sctp_timeout_cookie_wait, | ||
591 | .maxlen = sizeof(unsigned int), | ||
592 | .mode = 0644, | ||
593 | .proc_handler = &proc_dointvec_jiffies, | ||
594 | }, | ||
595 | { | ||
596 | .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, | ||
597 | .procname = "ip_conntrack_sctp_timeout_cookie_echoed", | ||
598 | .data = &nf_ct_sctp_timeout_cookie_echoed, | ||
599 | .maxlen = sizeof(unsigned int), | ||
600 | .mode = 0644, | ||
601 | .proc_handler = &proc_dointvec_jiffies, | ||
602 | }, | ||
603 | { | ||
604 | .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, | ||
605 | .procname = "ip_conntrack_sctp_timeout_established", | ||
606 | .data = &nf_ct_sctp_timeout_established, | ||
607 | .maxlen = sizeof(unsigned int), | ||
608 | .mode = 0644, | ||
609 | .proc_handler = &proc_dointvec_jiffies, | ||
610 | }, | ||
611 | { | ||
612 | .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, | ||
613 | .procname = "ip_conntrack_sctp_timeout_shutdown_sent", | ||
614 | .data = &nf_ct_sctp_timeout_shutdown_sent, | ||
615 | .maxlen = sizeof(unsigned int), | ||
616 | .mode = 0644, | ||
617 | .proc_handler = &proc_dointvec_jiffies, | ||
618 | }, | ||
619 | { | ||
620 | .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, | ||
621 | .procname = "ip_conntrack_sctp_timeout_shutdown_recd", | ||
622 | .data = &nf_ct_sctp_timeout_shutdown_recd, | ||
623 | .maxlen = sizeof(unsigned int), | ||
624 | .mode = 0644, | ||
625 | .proc_handler = &proc_dointvec_jiffies, | ||
606 | }, | 626 | }, |
607 | { .ctl_name = 0 } | ||
608 | }; | ||
609 | |||
610 | static ctl_table nf_ct_net_table[] = { | ||
611 | { | 627 | { |
612 | .ctl_name = CTL_NET, | 628 | .ctl_name = NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, |
613 | .procname = "net", | 629 | .procname = "ip_conntrack_sctp_timeout_shutdown_ack_sent", |
614 | .mode = 0555, | 630 | .data = &nf_ct_sctp_timeout_shutdown_ack_sent, |
615 | .child = nf_ct_netfilter_table, | 631 | .maxlen = sizeof(unsigned int), |
632 | .mode = 0644, | ||
633 | .proc_handler = &proc_dointvec_jiffies, | ||
616 | }, | 634 | }, |
617 | { .ctl_name = 0 } | 635 | { |
636 | .ctl_name = 0 | ||
637 | } | ||
638 | }; | ||
639 | #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ | ||
640 | #endif | ||
641 | |||
642 | struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = { | ||
643 | .l3proto = PF_INET, | ||
644 | .l4proto = IPPROTO_SCTP, | ||
645 | .name = "sctp", | ||
646 | .pkt_to_tuple = sctp_pkt_to_tuple, | ||
647 | .invert_tuple = sctp_invert_tuple, | ||
648 | .print_tuple = sctp_print_tuple, | ||
649 | .print_conntrack = sctp_print_conntrack, | ||
650 | .packet = sctp_packet, | ||
651 | .new = sctp_new, | ||
652 | .me = THIS_MODULE, | ||
653 | #ifdef CONFIG_SYSCTL | ||
654 | .ctl_table_users = &sctp_sysctl_table_users, | ||
655 | .ctl_table_header = &sctp_sysctl_header, | ||
656 | .ctl_table = sctp_sysctl_table, | ||
657 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT | ||
658 | .ctl_compat_table = sctp_compat_sysctl_table, | ||
659 | #endif | ||
660 | #endif | ||
618 | }; | 661 | }; |
619 | 662 | ||
620 | static struct ctl_table_header *nf_ct_sysctl_header; | 663 | struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { |
664 | .l3proto = PF_INET6, | ||
665 | .l4proto = IPPROTO_SCTP, | ||
666 | .name = "sctp", | ||
667 | .pkt_to_tuple = sctp_pkt_to_tuple, | ||
668 | .invert_tuple = sctp_invert_tuple, | ||
669 | .print_tuple = sctp_print_tuple, | ||
670 | .print_conntrack = sctp_print_conntrack, | ||
671 | .packet = sctp_packet, | ||
672 | .new = sctp_new, | ||
673 | .me = THIS_MODULE, | ||
674 | #ifdef CONFIG_SYSCTL | ||
675 | .ctl_table_users = &sctp_sysctl_table_users, | ||
676 | .ctl_table_header = &sctp_sysctl_header, | ||
677 | .ctl_table = sctp_sysctl_table, | ||
621 | #endif | 678 | #endif |
679 | }; | ||
622 | 680 | ||
623 | int __init nf_conntrack_proto_sctp_init(void) | 681 | int __init nf_conntrack_proto_sctp_init(void) |
624 | { | 682 | { |
625 | int ret; | 683 | int ret; |
626 | 684 | ||
627 | ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_sctp4); | 685 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp4); |
628 | if (ret) { | 686 | if (ret) { |
629 | printk("nf_conntrack_proto_sctp4: protocol register failed\n"); | 687 | printk("nf_conntrack_l4proto_sctp4: protocol register failed\n"); |
630 | goto out; | 688 | goto out; |
631 | } | 689 | } |
632 | ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_sctp6); | 690 | ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp6); |
633 | if (ret) { | 691 | if (ret) { |
634 | printk("nf_conntrack_proto_sctp6: protocol register failed\n"); | 692 | printk("nf_conntrack_l4proto_sctp6: protocol register failed\n"); |
635 | goto cleanup_sctp4; | 693 | goto cleanup_sctp4; |
636 | } | 694 | } |
637 | 695 | ||
638 | #ifdef CONFIG_SYSCTL | ||
639 | nf_ct_sysctl_header = register_sysctl_table(nf_ct_net_table, 0); | ||
640 | if (nf_ct_sysctl_header == NULL) { | ||
641 | printk("nf_conntrack_proto_sctp: can't register to sysctl.\n"); | ||
642 | goto cleanup; | ||
643 | } | ||
644 | #endif | ||
645 | |||
646 | return ret; | 696 | return ret; |
647 | 697 | ||
648 | #ifdef CONFIG_SYSCTL | ||
649 | cleanup: | ||
650 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp6); | ||
651 | #endif | ||
652 | cleanup_sctp4: | 698 | cleanup_sctp4: |
653 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp4); | 699 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); |
654 | out: | 700 | out: |
655 | DEBUGP("SCTP conntrack module loading %s\n", | 701 | DEBUGP("SCTP conntrack module loading %s\n", |
656 | ret ? "failed": "succeeded"); | 702 | ret ? "failed": "succeeded"); |
@@ -659,11 +705,8 @@ int __init nf_conntrack_proto_sctp_init(void) | |||
659 | 705 | ||
660 | void __exit nf_conntrack_proto_sctp_fini(void) | 706 | void __exit nf_conntrack_proto_sctp_fini(void) |
661 | { | 707 | { |
662 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp6); | 708 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp6); |
663 | nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp4); | 709 | nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4); |
664 | #ifdef CONFIG_SYSCTL | ||
665 | unregister_sysctl_table(nf_ct_sysctl_header); | ||
666 | #endif | ||
667 | DEBUGP("SCTP conntrack module unloaded\n"); | 710 | DEBUGP("SCTP conntrack module unloaded\n"); |
668 | } | 711 | } |
669 | 712 | ||
@@ -673,3 +716,4 @@ module_exit(nf_conntrack_proto_sctp_fini); | |||
673 | MODULE_LICENSE("GPL"); | 716 | MODULE_LICENSE("GPL"); |
674 | MODULE_AUTHOR("Kiran Kumar Immidi"); | 717 | MODULE_AUTHOR("Kiran Kumar Immidi"); |
675 | MODULE_DESCRIPTION("Netfilter connection tracking protocol helper for SCTP"); | 718 | MODULE_DESCRIPTION("Netfilter connection tracking protocol helper for SCTP"); |
719 | MODULE_ALIAS("ip_conntrack_proto_sctp"); | ||
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 238bbb5b72ef..626b0011dd89 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -42,7 +42,8 @@ | |||
42 | #include <linux/netfilter_ipv4.h> | 42 | #include <linux/netfilter_ipv4.h> |
43 | #include <linux/netfilter_ipv6.h> | 43 | #include <linux/netfilter_ipv6.h> |
44 | #include <net/netfilter/nf_conntrack.h> | 44 | #include <net/netfilter/nf_conntrack.h> |
45 | #include <net/netfilter/nf_conntrack_protocol.h> | 45 | #include <net/netfilter/nf_conntrack_l4proto.h> |
46 | #include <net/netfilter/nf_conntrack_ecache.h> | ||
46 | 47 | ||
47 | #if 0 | 48 | #if 0 |
48 | #define DEBUGP printk | 49 | #define DEBUGP printk |
@@ -92,22 +93,22 @@ static const char *tcp_conntrack_names[] = { | |||
92 | #define HOURS * 60 MINS | 93 | #define HOURS * 60 MINS |
93 | #define DAYS * 24 HOURS | 94 | #define DAYS * 24 HOURS |
94 | 95 | ||
95 | unsigned int nf_ct_tcp_timeout_syn_sent __read_mostly = 2 MINS; | 96 | static unsigned int nf_ct_tcp_timeout_syn_sent __read_mostly = 2 MINS; |
96 | unsigned int nf_ct_tcp_timeout_syn_recv __read_mostly = 60 SECS; | 97 | static unsigned int nf_ct_tcp_timeout_syn_recv __read_mostly = 60 SECS; |
97 | unsigned int nf_ct_tcp_timeout_established __read_mostly = 5 DAYS; | 98 | static unsigned int nf_ct_tcp_timeout_established __read_mostly = 5 DAYS; |
98 | unsigned int nf_ct_tcp_timeout_fin_wait __read_mostly = 2 MINS; | 99 | static unsigned int nf_ct_tcp_timeout_fin_wait __read_mostly = 2 MINS; |
99 | unsigned int nf_ct_tcp_timeout_close_wait __read_mostly = 60 SECS; | 100 | static unsigned int nf_ct_tcp_timeout_close_wait __read_mostly = 60 SECS; |
100 | unsigned int nf_ct_tcp_timeout_last_ack __read_mostly = 30 SECS; | 101 | static unsigned int nf_ct_tcp_timeout_last_ack __read_mostly = 30 SECS; |
101 | unsigned int nf_ct_tcp_timeout_time_wait __read_mostly = 2 MINS; | 102 | static unsigned int nf_ct_tcp_timeout_time_wait __read_mostly = 2 MINS; |
102 | unsigned int nf_ct_tcp_timeout_close __read_mostly = 10 SECS; | 103 | static unsigned int nf_ct_tcp_timeout_close __read_mostly = 10 SECS; |
103 | 104 | ||
104 | /* RFC1122 says the R2 limit should be at least 100 seconds. | 105 | /* RFC1122 says the R2 limit should be at least 100 seconds. |
105 | Linux uses 15 packets as limit, which corresponds | 106 | Linux uses 15 packets as limit, which corresponds |
106 | to ~13-30min depending on RTO. */ | 107 | to ~13-30min depending on RTO. */ |
107 | unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS; | 108 | static unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS; |
108 | 109 | ||
109 | static unsigned int * tcp_timeouts[] | 110 | static unsigned int * tcp_timeouts[] = { |
110 | = { NULL, /* TCP_CONNTRACK_NONE */ | 111 | NULL, /* TCP_CONNTRACK_NONE */ |
111 | &nf_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */ | 112 | &nf_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */ |
112 | &nf_ct_tcp_timeout_syn_recv, /* TCP_CONNTRACK_SYN_RECV, */ | 113 | &nf_ct_tcp_timeout_syn_recv, /* TCP_CONNTRACK_SYN_RECV, */ |
113 | &nf_ct_tcp_timeout_established, /* TCP_CONNTRACK_ESTABLISHED, */ | 114 | &nf_ct_tcp_timeout_established, /* TCP_CONNTRACK_ESTABLISHED, */ |
@@ -473,8 +474,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, | |||
473 | 474 | ||
474 | /* Fast path for timestamp-only option */ | 475 | /* Fast path for timestamp-only option */ |
475 | if (length == TCPOLEN_TSTAMP_ALIGNED*4 | 476 | if (length == TCPOLEN_TSTAMP_ALIGNED*4 |
476 | && *(__u32 *)ptr == | 477 | && *(__be32 *)ptr == |
477 | __constant_ntohl((TCPOPT_NOP << 24) | 478 | __constant_htonl((TCPOPT_NOP << 24) |
478 | | (TCPOPT_NOP << 16) | 479 | | (TCPOPT_NOP << 16) |
479 | | (TCPOPT_TIMESTAMP << 8) | 480 | | (TCPOPT_TIMESTAMP << 8) |
480 | | TCPOLEN_TIMESTAMP)) | 481 | | TCPOLEN_TIMESTAMP)) |
@@ -505,9 +506,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, | |||
505 | for (i = 0; | 506 | for (i = 0; |
506 | i < (opsize - TCPOLEN_SACK_BASE); | 507 | i < (opsize - TCPOLEN_SACK_BASE); |
507 | i += TCPOLEN_SACK_PERBLOCK) { | 508 | i += TCPOLEN_SACK_PERBLOCK) { |
508 | memcpy(&tmp, (__u32 *)(ptr + i) + 1, | 509 | tmp = ntohl(*((__be32 *)(ptr+i)+1)); |
509 | sizeof(__u32)); | ||
510 | tmp = ntohl(tmp); | ||
511 | 510 | ||
512 | if (after(tmp, *sack)) | 511 | if (after(tmp, *sack)) |
513 | *sack = tmp; | 512 | *sack = tmp; |
@@ -731,7 +730,7 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
731 | return res; | 730 | return res; |
732 | } | 731 | } |
733 | 732 | ||
734 | #ifdef CONFIG_IP_NF_NAT_NEEDED | 733 | #ifdef CONFIG_NF_NAT_NEEDED |
735 | /* Update sender->td_end after NAT successfully mangled the packet */ | 734 | /* Update sender->td_end after NAT successfully mangled the packet */ |
736 | /* Caller must linearize skb at tcp header. */ | 735 | /* Caller must linearize skb at tcp header. */ |
737 | void nf_conntrack_tcp_update(struct sk_buff *skb, | 736 | void nf_conntrack_tcp_update(struct sk_buff *skb, |
@@ -763,7 +762,7 @@ void nf_conntrack_tcp_update(struct sk_buff *skb, | |||
763 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, | 762 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, |
764 | receiver->td_scale); | 763 | receiver->td_scale); |
765 | } | 764 | } |
766 | 765 | EXPORT_SYMBOL_GPL(nf_conntrack_tcp_update); | |
767 | #endif | 766 | #endif |
768 | 767 | ||
769 | #define TH_FIN 0x01 | 768 | #define TH_FIN 0x01 |
@@ -1167,11 +1166,221 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct nf_conn *ct) | |||
1167 | return 0; | 1166 | return 0; |
1168 | } | 1167 | } |
1169 | #endif | 1168 | #endif |
1170 | 1169 | ||
1171 | struct nf_conntrack_protocol nf_conntrack_protocol_tcp4 = | 1170 | #ifdef CONFIG_SYSCTL |
1171 | static unsigned int tcp_sysctl_table_users; | ||
1172 | static struct ctl_table_header *tcp_sysctl_header; | ||
1173 | static struct ctl_table tcp_sysctl_table[] = { | ||
1174 | { | ||
1175 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, | ||
1176 | .procname = "nf_conntrack_tcp_timeout_syn_sent", | ||
1177 | .data = &nf_ct_tcp_timeout_syn_sent, | ||
1178 | .maxlen = sizeof(unsigned int), | ||
1179 | .mode = 0644, | ||
1180 | .proc_handler = &proc_dointvec_jiffies, | ||
1181 | }, | ||
1182 | { | ||
1183 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, | ||
1184 | .procname = "nf_conntrack_tcp_timeout_syn_recv", | ||
1185 | .data = &nf_ct_tcp_timeout_syn_recv, | ||
1186 | .maxlen = sizeof(unsigned int), | ||
1187 | .mode = 0644, | ||
1188 | .proc_handler = &proc_dointvec_jiffies, | ||
1189 | }, | ||
1190 | { | ||
1191 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, | ||
1192 | .procname = "nf_conntrack_tcp_timeout_established", | ||
1193 | .data = &nf_ct_tcp_timeout_established, | ||
1194 | .maxlen = sizeof(unsigned int), | ||
1195 | .mode = 0644, | ||
1196 | .proc_handler = &proc_dointvec_jiffies, | ||
1197 | }, | ||
1198 | { | ||
1199 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, | ||
1200 | .procname = "nf_conntrack_tcp_timeout_fin_wait", | ||
1201 | .data = &nf_ct_tcp_timeout_fin_wait, | ||
1202 | .maxlen = sizeof(unsigned int), | ||
1203 | .mode = 0644, | ||
1204 | .proc_handler = &proc_dointvec_jiffies, | ||
1205 | }, | ||
1206 | { | ||
1207 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, | ||
1208 | .procname = "nf_conntrack_tcp_timeout_close_wait", | ||
1209 | .data = &nf_ct_tcp_timeout_close_wait, | ||
1210 | .maxlen = sizeof(unsigned int), | ||
1211 | .mode = 0644, | ||
1212 | .proc_handler = &proc_dointvec_jiffies, | ||
1213 | }, | ||
1214 | { | ||
1215 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, | ||
1216 | .procname = "nf_conntrack_tcp_timeout_last_ack", | ||
1217 | .data = &nf_ct_tcp_timeout_last_ack, | ||
1218 | .maxlen = sizeof(unsigned int), | ||
1219 | .mode = 0644, | ||
1220 | .proc_handler = &proc_dointvec_jiffies, | ||
1221 | }, | ||
1222 | { | ||
1223 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, | ||
1224 | .procname = "nf_conntrack_tcp_timeout_time_wait", | ||
1225 | .data = &nf_ct_tcp_timeout_time_wait, | ||
1226 | .maxlen = sizeof(unsigned int), | ||
1227 | .mode = 0644, | ||
1228 | .proc_handler = &proc_dointvec_jiffies, | ||
1229 | }, | ||
1230 | { | ||
1231 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, | ||
1232 | .procname = "nf_conntrack_tcp_timeout_close", | ||
1233 | .data = &nf_ct_tcp_timeout_close, | ||
1234 | .maxlen = sizeof(unsigned int), | ||
1235 | .mode = 0644, | ||
1236 | .proc_handler = &proc_dointvec_jiffies, | ||
1237 | }, | ||
1238 | { | ||
1239 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, | ||
1240 | .procname = "nf_conntrack_tcp_timeout_max_retrans", | ||
1241 | .data = &nf_ct_tcp_timeout_max_retrans, | ||
1242 | .maxlen = sizeof(unsigned int), | ||
1243 | .mode = 0644, | ||
1244 | .proc_handler = &proc_dointvec_jiffies, | ||
1245 | }, | ||
1246 | { | ||
1247 | .ctl_name = NET_NF_CONNTRACK_TCP_LOOSE, | ||
1248 | .procname = "nf_conntrack_tcp_loose", | ||
1249 | .data = &nf_ct_tcp_loose, | ||
1250 | .maxlen = sizeof(unsigned int), | ||
1251 | .mode = 0644, | ||
1252 | .proc_handler = &proc_dointvec, | ||
1253 | }, | ||
1254 | { | ||
1255 | .ctl_name = NET_NF_CONNTRACK_TCP_BE_LIBERAL, | ||
1256 | .procname = "nf_conntrack_tcp_be_liberal", | ||
1257 | .data = &nf_ct_tcp_be_liberal, | ||
1258 | .maxlen = sizeof(unsigned int), | ||
1259 | .mode = 0644, | ||
1260 | .proc_handler = &proc_dointvec, | ||
1261 | }, | ||
1262 | { | ||
1263 | .ctl_name = NET_NF_CONNTRACK_TCP_MAX_RETRANS, | ||
1264 | .procname = "nf_conntrack_tcp_max_retrans", | ||
1265 | .data = &nf_ct_tcp_max_retrans, | ||
1266 | .maxlen = sizeof(unsigned int), | ||
1267 | .mode = 0644, | ||
1268 | .proc_handler = &proc_dointvec, | ||
1269 | }, | ||
1270 | { | ||
1271 | .ctl_name = 0 | ||
1272 | } | ||
1273 | }; | ||
1274 | |||
1275 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT | ||
1276 | static struct ctl_table tcp_compat_sysctl_table[] = { | ||
1277 | { | ||
1278 | .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, | ||
1279 | .procname = "ip_conntrack_tcp_timeout_syn_sent", | ||
1280 | .data = &nf_ct_tcp_timeout_syn_sent, | ||
1281 | .maxlen = sizeof(unsigned int), | ||
1282 | .mode = 0644, | ||
1283 | .proc_handler = &proc_dointvec_jiffies, | ||
1284 | }, | ||
1285 | { | ||
1286 | .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, | ||
1287 | .procname = "ip_conntrack_tcp_timeout_syn_recv", | ||
1288 | .data = &nf_ct_tcp_timeout_syn_recv, | ||
1289 | .maxlen = sizeof(unsigned int), | ||
1290 | .mode = 0644, | ||
1291 | .proc_handler = &proc_dointvec_jiffies, | ||
1292 | }, | ||
1293 | { | ||
1294 | .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, | ||
1295 | .procname = "ip_conntrack_tcp_timeout_established", | ||
1296 | .data = &nf_ct_tcp_timeout_established, | ||
1297 | .maxlen = sizeof(unsigned int), | ||
1298 | .mode = 0644, | ||
1299 | .proc_handler = &proc_dointvec_jiffies, | ||
1300 | }, | ||
1301 | { | ||
1302 | .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, | ||
1303 | .procname = "ip_conntrack_tcp_timeout_fin_wait", | ||
1304 | .data = &nf_ct_tcp_timeout_fin_wait, | ||
1305 | .maxlen = sizeof(unsigned int), | ||
1306 | .mode = 0644, | ||
1307 | .proc_handler = &proc_dointvec_jiffies, | ||
1308 | }, | ||
1309 | { | ||
1310 | .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, | ||
1311 | .procname = "ip_conntrack_tcp_timeout_close_wait", | ||
1312 | .data = &nf_ct_tcp_timeout_close_wait, | ||
1313 | .maxlen = sizeof(unsigned int), | ||
1314 | .mode = 0644, | ||
1315 | .proc_handler = &proc_dointvec_jiffies, | ||
1316 | }, | ||
1317 | { | ||
1318 | .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, | ||
1319 | .procname = "ip_conntrack_tcp_timeout_last_ack", | ||
1320 | .data = &nf_ct_tcp_timeout_last_ack, | ||
1321 | .maxlen = sizeof(unsigned int), | ||
1322 | .mode = 0644, | ||
1323 | .proc_handler = &proc_dointvec_jiffies, | ||
1324 | }, | ||
1325 | { | ||
1326 | .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, | ||
1327 | .procname = "ip_conntrack_tcp_timeout_time_wait", | ||
1328 | .data = &nf_ct_tcp_timeout_time_wait, | ||
1329 | .maxlen = sizeof(unsigned int), | ||
1330 | .mode = 0644, | ||
1331 | .proc_handler = &proc_dointvec_jiffies, | ||
1332 | }, | ||
1333 | { | ||
1334 | .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, | ||
1335 | .procname = "ip_conntrack_tcp_timeout_close", | ||
1336 | .data = &nf_ct_tcp_timeout_close, | ||
1337 | .maxlen = sizeof(unsigned int), | ||
1338 | .mode = 0644, | ||
1339 | .proc_handler = &proc_dointvec_jiffies, | ||
1340 | }, | ||
1341 | { | ||
1342 | .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, | ||
1343 | .procname = "ip_conntrack_tcp_timeout_max_retrans", | ||
1344 | .data = &nf_ct_tcp_timeout_max_retrans, | ||
1345 | .maxlen = sizeof(unsigned int), | ||
1346 | .mode = 0644, | ||
1347 | .proc_handler = &proc_dointvec_jiffies, | ||
1348 | }, | ||
1349 | { | ||
1350 | .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_LOOSE, | ||
1351 | .procname = "ip_conntrack_tcp_loose", | ||
1352 | .data = &nf_ct_tcp_loose, | ||
1353 | .maxlen = sizeof(unsigned int), | ||
1354 | .mode = 0644, | ||
1355 | .proc_handler = &proc_dointvec, | ||
1356 | }, | ||
1357 | { | ||
1358 | .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL, | ||
1359 | .procname = "ip_conntrack_tcp_be_liberal", | ||
1360 | .data = &nf_ct_tcp_be_liberal, | ||
1361 | .maxlen = sizeof(unsigned int), | ||
1362 | .mode = 0644, | ||
1363 | .proc_handler = &proc_dointvec, | ||
1364 | }, | ||
1365 | { | ||
1366 | .ctl_name = NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS, | ||
1367 | .procname = "ip_conntrack_tcp_max_retrans", | ||
1368 | .data = &nf_ct_tcp_max_retrans, | ||
1369 | .maxlen = sizeof(unsigned int), | ||
1370 | .mode = 0644, | ||
1371 | .proc_handler = &proc_dointvec, | ||
1372 | }, | ||
1373 | { | ||
1374 | .ctl_name = 0 | ||
1375 | } | ||
1376 | }; | ||
1377 | #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ | ||
1378 | #endif /* CONFIG_SYSCTL */ | ||
1379 | |||
1380 | struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = | ||
1172 | { | 1381 | { |
1173 | .l3proto = PF_INET, | 1382 | .l3proto = PF_INET, |
1174 | .proto = IPPROTO_TCP, | 1383 | .l4proto = IPPROTO_TCP, |
1175 | .name = "tcp", | 1384 | .name = "tcp", |
1176 | .pkt_to_tuple = tcp_pkt_to_tuple, | 1385 | .pkt_to_tuple = tcp_pkt_to_tuple, |
1177 | .invert_tuple = tcp_invert_tuple, | 1386 | .invert_tuple = tcp_invert_tuple, |
@@ -1187,12 +1396,21 @@ struct nf_conntrack_protocol nf_conntrack_protocol_tcp4 = | |||
1187 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, | 1396 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, |
1188 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, | 1397 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, |
1189 | #endif | 1398 | #endif |
1399 | #ifdef CONFIG_SYSCTL | ||
1400 | .ctl_table_users = &tcp_sysctl_table_users, | ||
1401 | .ctl_table_header = &tcp_sysctl_header, | ||
1402 | .ctl_table = tcp_sysctl_table, | ||
1403 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT | ||
1404 | .ctl_compat_table = tcp_compat_sysctl_table, | ||
1405 | #endif | ||
1406 | #endif | ||
1190 | }; | 1407 | }; |
1408 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4); | ||
1191 | 1409 | ||
1192 | struct nf_conntrack_protocol nf_conntrack_protocol_tcp6 = | 1410 | struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = |
1193 | { | 1411 | { |
1194 | .l3proto = PF_INET6, | 1412 | .l3proto = PF_INET6, |
1195 | .proto = IPPROTO_TCP, | 1413 | .l4proto = IPPROTO_TCP, |
1196 | .name = "tcp", | 1414 | .name = "tcp", |
1197 | .pkt_to_tuple = tcp_pkt_to_tuple, | 1415 | .pkt_to_tuple = tcp_pkt_to_tuple, |
1198 | .invert_tuple = tcp_invert_tuple, | 1416 | .invert_tuple = tcp_invert_tuple, |
@@ -1208,7 +1426,10 @@ struct nf_conntrack_protocol nf_conntrack_protocol_tcp6 = | |||
1208 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, | 1426 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, |
1209 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, | 1427 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, |
1210 | #endif | 1428 | #endif |
1429 | #ifdef CONFIG_SYSCTL | ||
1430 | .ctl_table_users = &tcp_sysctl_table_users, | ||
1431 | .ctl_table_header = &tcp_sysctl_header, | ||
1432 | .ctl_table = tcp_sysctl_table, | ||
1433 | #endif | ||
1211 | }; | 1434 | }; |
1212 | 1435 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6); | |
1213 | EXPORT_SYMBOL(nf_conntrack_protocol_tcp4); | ||
1214 | EXPORT_SYMBOL(nf_conntrack_protocol_tcp6); | ||
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index d28981cf9af5..e49cd25998c4 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c | |||
@@ -22,13 +22,15 @@ | |||
22 | #include <linux/ipv6.h> | 22 | #include <linux/ipv6.h> |
23 | #include <net/ip6_checksum.h> | 23 | #include <net/ip6_checksum.h> |
24 | #include <net/checksum.h> | 24 | #include <net/checksum.h> |
25 | |||
25 | #include <linux/netfilter.h> | 26 | #include <linux/netfilter.h> |
26 | #include <linux/netfilter_ipv4.h> | 27 | #include <linux/netfilter_ipv4.h> |
27 | #include <linux/netfilter_ipv6.h> | 28 | #include <linux/netfilter_ipv6.h> |
28 | #include <net/netfilter/nf_conntrack_protocol.h> | 29 | #include <net/netfilter/nf_conntrack_l4proto.h> |
30 | #include <net/netfilter/nf_conntrack_ecache.h> | ||
29 | 31 | ||
30 | unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ; | 32 | static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ; |
31 | unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ; | 33 | static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ; |
32 | 34 | ||
33 | static int udp_pkt_to_tuple(const struct sk_buff *skb, | 35 | static int udp_pkt_to_tuple(const struct sk_buff *skb, |
34 | unsigned int dataoff, | 36 | unsigned int dataoff, |
@@ -146,10 +148,59 @@ static int udp_error(struct sk_buff *skb, unsigned int dataoff, | |||
146 | return NF_ACCEPT; | 148 | return NF_ACCEPT; |
147 | } | 149 | } |
148 | 150 | ||
149 | struct nf_conntrack_protocol nf_conntrack_protocol_udp4 = | 151 | #ifdef CONFIG_SYSCTL |
152 | static unsigned int udp_sysctl_table_users; | ||
153 | static struct ctl_table_header *udp_sysctl_header; | ||
154 | static struct ctl_table udp_sysctl_table[] = { | ||
155 | { | ||
156 | .ctl_name = NET_NF_CONNTRACK_UDP_TIMEOUT, | ||
157 | .procname = "nf_conntrack_udp_timeout", | ||
158 | .data = &nf_ct_udp_timeout, | ||
159 | .maxlen = sizeof(unsigned int), | ||
160 | .mode = 0644, | ||
161 | .proc_handler = &proc_dointvec_jiffies, | ||
162 | }, | ||
163 | { | ||
164 | .ctl_name = NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM, | ||
165 | .procname = "nf_conntrack_udp_timeout_stream", | ||
166 | .data = &nf_ct_udp_timeout_stream, | ||
167 | .maxlen = sizeof(unsigned int), | ||
168 | .mode = 0644, | ||
169 | .proc_handler = &proc_dointvec_jiffies, | ||
170 | }, | ||
171 | { | ||
172 | .ctl_name = 0 | ||
173 | } | ||
174 | }; | ||
175 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT | ||
176 | static struct ctl_table udp_compat_sysctl_table[] = { | ||
177 | { | ||
178 | .ctl_name = NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT, | ||
179 | .procname = "ip_conntrack_udp_timeout", | ||
180 | .data = &nf_ct_udp_timeout, | ||
181 | .maxlen = sizeof(unsigned int), | ||
182 | .mode = 0644, | ||
183 | .proc_handler = &proc_dointvec_jiffies, | ||
184 | }, | ||
185 | { | ||
186 | .ctl_name = NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM, | ||
187 | .procname = "ip_conntrack_udp_timeout_stream", | ||
188 | .data = &nf_ct_udp_timeout_stream, | ||
189 | .maxlen = sizeof(unsigned int), | ||
190 | .mode = 0644, | ||
191 | .proc_handler = &proc_dointvec_jiffies, | ||
192 | }, | ||
193 | { | ||
194 | .ctl_name = 0 | ||
195 | } | ||
196 | }; | ||
197 | #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ | ||
198 | #endif /* CONFIG_SYSCTL */ | ||
199 | |||
200 | struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = | ||
150 | { | 201 | { |
151 | .l3proto = PF_INET, | 202 | .l3proto = PF_INET, |
152 | .proto = IPPROTO_UDP, | 203 | .l4proto = IPPROTO_UDP, |
153 | .name = "udp", | 204 | .name = "udp", |
154 | .pkt_to_tuple = udp_pkt_to_tuple, | 205 | .pkt_to_tuple = udp_pkt_to_tuple, |
155 | .invert_tuple = udp_invert_tuple, | 206 | .invert_tuple = udp_invert_tuple, |
@@ -163,12 +214,21 @@ struct nf_conntrack_protocol nf_conntrack_protocol_udp4 = | |||
163 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, | 214 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, |
164 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, | 215 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, |
165 | #endif | 216 | #endif |
217 | #ifdef CONFIG_SYSCTL | ||
218 | .ctl_table_users = &udp_sysctl_table_users, | ||
219 | .ctl_table_header = &udp_sysctl_header, | ||
220 | .ctl_table = udp_sysctl_table, | ||
221 | #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT | ||
222 | .ctl_compat_table = udp_compat_sysctl_table, | ||
223 | #endif | ||
224 | #endif | ||
166 | }; | 225 | }; |
226 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4); | ||
167 | 227 | ||
168 | struct nf_conntrack_protocol nf_conntrack_protocol_udp6 = | 228 | struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = |
169 | { | 229 | { |
170 | .l3proto = PF_INET6, | 230 | .l3proto = PF_INET6, |
171 | .proto = IPPROTO_UDP, | 231 | .l4proto = IPPROTO_UDP, |
172 | .name = "udp", | 232 | .name = "udp", |
173 | .pkt_to_tuple = udp_pkt_to_tuple, | 233 | .pkt_to_tuple = udp_pkt_to_tuple, |
174 | .invert_tuple = udp_invert_tuple, | 234 | .invert_tuple = udp_invert_tuple, |
@@ -182,7 +242,10 @@ struct nf_conntrack_protocol nf_conntrack_protocol_udp6 = | |||
182 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, | 242 | .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, |
183 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, | 243 | .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, |
184 | #endif | 244 | #endif |
245 | #ifdef CONFIG_SYSCTL | ||
246 | .ctl_table_users = &udp_sysctl_table_users, | ||
247 | .ctl_table_header = &udp_sysctl_header, | ||
248 | .ctl_table = udp_sysctl_table, | ||
249 | #endif | ||
185 | }; | 250 | }; |
186 | 251 | EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6); | |
187 | EXPORT_SYMBOL(nf_conntrack_protocol_udp4); | ||
188 | EXPORT_SYMBOL(nf_conntrack_protocol_udp6); | ||
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c new file mode 100644 index 000000000000..eb2a2411f97b --- /dev/null +++ b/net/netfilter/nf_conntrack_sip.c | |||
@@ -0,0 +1,531 @@ | |||
1 | /* SIP extension for IP connection tracking. | ||
2 | * | ||
3 | * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar> | ||
4 | * based on RR's ip_conntrack_ftp.c and other modules. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/ctype.h> | ||
13 | #include <linux/skbuff.h> | ||
14 | #include <linux/inet.h> | ||
15 | #include <linux/in.h> | ||
16 | #include <linux/udp.h> | ||
17 | #include <linux/netfilter.h> | ||
18 | |||
19 | #include <net/netfilter/nf_conntrack.h> | ||
20 | #include <net/netfilter/nf_conntrack_expect.h> | ||
21 | #include <net/netfilter/nf_conntrack_helper.h> | ||
22 | #include <linux/netfilter/nf_conntrack_sip.h> | ||
23 | |||
24 | #if 0 | ||
25 | #define DEBUGP printk | ||
26 | #else | ||
27 | #define DEBUGP(format, args...) | ||
28 | #endif | ||
29 | |||
30 | MODULE_LICENSE("GPL"); | ||
31 | MODULE_AUTHOR("Christian Hentschel <chentschel@arnet.com.ar>"); | ||
32 | MODULE_DESCRIPTION("SIP connection tracking helper"); | ||
33 | MODULE_ALIAS("ip_conntrack_sip"); | ||
34 | |||
35 | #define MAX_PORTS 8 | ||
36 | static unsigned short ports[MAX_PORTS]; | ||
37 | static int ports_c; | ||
38 | module_param_array(ports, ushort, &ports_c, 0400); | ||
39 | MODULE_PARM_DESC(ports, "port numbers of SIP servers"); | ||
40 | |||
41 | static unsigned int sip_timeout __read_mostly = SIP_TIMEOUT; | ||
42 | module_param(sip_timeout, uint, 0600); | ||
43 | MODULE_PARM_DESC(sip_timeout, "timeout for the master SIP session"); | ||
44 | |||
45 | unsigned int (*nf_nat_sip_hook)(struct sk_buff **pskb, | ||
46 | enum ip_conntrack_info ctinfo, | ||
47 | struct nf_conn *ct, | ||
48 | const char **dptr) __read_mostly; | ||
49 | EXPORT_SYMBOL_GPL(nf_nat_sip_hook); | ||
50 | |||
51 | unsigned int (*nf_nat_sdp_hook)(struct sk_buff **pskb, | ||
52 | enum ip_conntrack_info ctinfo, | ||
53 | struct nf_conntrack_expect *exp, | ||
54 | const char *dptr) __read_mostly; | ||
55 | EXPORT_SYMBOL_GPL(nf_nat_sdp_hook); | ||
56 | |||
57 | static int digits_len(struct nf_conn *, const char *, const char *, int *); | ||
58 | static int epaddr_len(struct nf_conn *, const char *, const char *, int *); | ||
59 | static int skp_digits_len(struct nf_conn *, const char *, const char *, int *); | ||
60 | static int skp_epaddr_len(struct nf_conn *, const char *, const char *, int *); | ||
61 | |||
62 | struct sip_header_nfo { | ||
63 | const char *lname; | ||
64 | const char *sname; | ||
65 | const char *ln_str; | ||
66 | size_t lnlen; | ||
67 | size_t snlen; | ||
68 | size_t ln_strlen; | ||
69 | int case_sensitive; | ||
70 | int (*match_len)(struct nf_conn *, const char *, | ||
71 | const char *, int *); | ||
72 | }; | ||
73 | |||
74 | static const struct sip_header_nfo ct_sip_hdrs[] = { | ||
75 | [POS_REG_REQ_URI] = { /* SIP REGISTER request URI */ | ||
76 | .lname = "sip:", | ||
77 | .lnlen = sizeof("sip:") - 1, | ||
78 | .ln_str = ":", | ||
79 | .ln_strlen = sizeof(":") - 1, | ||
80 | .match_len = epaddr_len, | ||
81 | }, | ||
82 | [POS_REQ_URI] = { /* SIP request URI */ | ||
83 | .lname = "sip:", | ||
84 | .lnlen = sizeof("sip:") - 1, | ||
85 | .ln_str = "@", | ||
86 | .ln_strlen = sizeof("@") - 1, | ||
87 | .match_len = epaddr_len, | ||
88 | }, | ||
89 | [POS_FROM] = { /* SIP From header */ | ||
90 | .lname = "From:", | ||
91 | .lnlen = sizeof("From:") - 1, | ||
92 | .sname = "\r\nf:", | ||
93 | .snlen = sizeof("\r\nf:") - 1, | ||
94 | .ln_str = "sip:", | ||
95 | .ln_strlen = sizeof("sip:") - 1, | ||
96 | .match_len = skp_epaddr_len, | ||
97 | }, | ||
98 | [POS_TO] = { /* SIP To header */ | ||
99 | .lname = "To:", | ||
100 | .lnlen = sizeof("To:") - 1, | ||
101 | .sname = "\r\nt:", | ||
102 | .snlen = sizeof("\r\nt:") - 1, | ||
103 | .ln_str = "sip:", | ||
104 | .ln_strlen = sizeof("sip:") - 1, | ||
105 | .match_len = skp_epaddr_len | ||
106 | }, | ||
107 | [POS_VIA] = { /* SIP Via header */ | ||
108 | .lname = "Via:", | ||
109 | .lnlen = sizeof("Via:") - 1, | ||
110 | .sname = "\r\nv:", | ||
111 | .snlen = sizeof("\r\nv:") - 1, /* rfc3261 "\r\n" */ | ||
112 | .ln_str = "UDP ", | ||
113 | .ln_strlen = sizeof("UDP ") - 1, | ||
114 | .match_len = epaddr_len, | ||
115 | }, | ||
116 | [POS_CONTACT] = { /* SIP Contact header */ | ||
117 | .lname = "Contact:", | ||
118 | .lnlen = sizeof("Contact:") - 1, | ||
119 | .sname = "\r\nm:", | ||
120 | .snlen = sizeof("\r\nm:") - 1, | ||
121 | .ln_str = "sip:", | ||
122 | .ln_strlen = sizeof("sip:") - 1, | ||
123 | .match_len = skp_epaddr_len | ||
124 | }, | ||
125 | [POS_CONTENT] = { /* SIP Content length header */ | ||
126 | .lname = "Content-Length:", | ||
127 | .lnlen = sizeof("Content-Length:") - 1, | ||
128 | .sname = "\r\nl:", | ||
129 | .snlen = sizeof("\r\nl:") - 1, | ||
130 | .ln_str = ":", | ||
131 | .ln_strlen = sizeof(":") - 1, | ||
132 | .match_len = skp_digits_len | ||
133 | }, | ||
134 | [POS_MEDIA] = { /* SDP media info */ | ||
135 | .case_sensitive = 1, | ||
136 | .lname = "\nm=", | ||
137 | .lnlen = sizeof("\nm=") - 1, | ||
138 | .sname = "\rm=", | ||
139 | .snlen = sizeof("\rm=") - 1, | ||
140 | .ln_str = "audio ", | ||
141 | .ln_strlen = sizeof("audio ") - 1, | ||
142 | .match_len = digits_len | ||
143 | }, | ||
144 | [POS_OWNER_IP4] = { /* SDP owner address*/ | ||
145 | .case_sensitive = 1, | ||
146 | .lname = "\no=", | ||
147 | .lnlen = sizeof("\no=") - 1, | ||
148 | .sname = "\ro=", | ||
149 | .snlen = sizeof("\ro=") - 1, | ||
150 | .ln_str = "IN IP4 ", | ||
151 | .ln_strlen = sizeof("IN IP4 ") - 1, | ||
152 | .match_len = epaddr_len | ||
153 | }, | ||
154 | [POS_CONNECTION_IP4] = {/* SDP connection info */ | ||
155 | .case_sensitive = 1, | ||
156 | .lname = "\nc=", | ||
157 | .lnlen = sizeof("\nc=") - 1, | ||
158 | .sname = "\rc=", | ||
159 | .snlen = sizeof("\rc=") - 1, | ||
160 | .ln_str = "IN IP4 ", | ||
161 | .ln_strlen = sizeof("IN IP4 ") - 1, | ||
162 | .match_len = epaddr_len | ||
163 | }, | ||
164 | [POS_OWNER_IP6] = { /* SDP owner address*/ | ||
165 | .case_sensitive = 1, | ||
166 | .lname = "\no=", | ||
167 | .lnlen = sizeof("\no=") - 1, | ||
168 | .sname = "\ro=", | ||
169 | .snlen = sizeof("\ro=") - 1, | ||
170 | .ln_str = "IN IP6 ", | ||
171 | .ln_strlen = sizeof("IN IP6 ") - 1, | ||
172 | .match_len = epaddr_len | ||
173 | }, | ||
174 | [POS_CONNECTION_IP6] = {/* SDP connection info */ | ||
175 | .case_sensitive = 1, | ||
176 | .lname = "\nc=", | ||
177 | .lnlen = sizeof("\nc=") - 1, | ||
178 | .sname = "\rc=", | ||
179 | .snlen = sizeof("\rc=") - 1, | ||
180 | .ln_str = "IN IP6 ", | ||
181 | .ln_strlen = sizeof("IN IP6 ") - 1, | ||
182 | .match_len = epaddr_len | ||
183 | }, | ||
184 | [POS_SDP_HEADER] = { /* SDP version header */ | ||
185 | .case_sensitive = 1, | ||
186 | .lname = "\nv=", | ||
187 | .lnlen = sizeof("\nv=") - 1, | ||
188 | .sname = "\rv=", | ||
189 | .snlen = sizeof("\rv=") - 1, | ||
190 | .ln_str = "=", | ||
191 | .ln_strlen = sizeof("=") - 1, | ||
192 | .match_len = digits_len | ||
193 | } | ||
194 | }; | ||
195 | |||
196 | /* get line lenght until first CR or LF seen. */ | ||
197 | int ct_sip_lnlen(const char *line, const char *limit) | ||
198 | { | ||
199 | const char *k = line; | ||
200 | |||
201 | while ((line <= limit) && (*line == '\r' || *line == '\n')) | ||
202 | line++; | ||
203 | |||
204 | while (line <= limit) { | ||
205 | if (*line == '\r' || *line == '\n') | ||
206 | break; | ||
207 | line++; | ||
208 | } | ||
209 | return line - k; | ||
210 | } | ||
211 | EXPORT_SYMBOL_GPL(ct_sip_lnlen); | ||
212 | |||
213 | /* Linear string search, case sensitive. */ | ||
214 | const char *ct_sip_search(const char *needle, const char *haystack, | ||
215 | size_t needle_len, size_t haystack_len, | ||
216 | int case_sensitive) | ||
217 | { | ||
218 | const char *limit = haystack + (haystack_len - needle_len); | ||
219 | |||
220 | while (haystack <= limit) { | ||
221 | if (case_sensitive) { | ||
222 | if (strncmp(haystack, needle, needle_len) == 0) | ||
223 | return haystack; | ||
224 | } else { | ||
225 | if (strnicmp(haystack, needle, needle_len) == 0) | ||
226 | return haystack; | ||
227 | } | ||
228 | haystack++; | ||
229 | } | ||
230 | return NULL; | ||
231 | } | ||
232 | EXPORT_SYMBOL_GPL(ct_sip_search); | ||
233 | |||
234 | static int digits_len(struct nf_conn *ct, const char *dptr, | ||
235 | const char *limit, int *shift) | ||
236 | { | ||
237 | int len = 0; | ||
238 | while (dptr <= limit && isdigit(*dptr)) { | ||
239 | dptr++; | ||
240 | len++; | ||
241 | } | ||
242 | return len; | ||
243 | } | ||
244 | |||
245 | /* get digits lenght, skiping blank spaces. */ | ||
246 | static int skp_digits_len(struct nf_conn *ct, const char *dptr, | ||
247 | const char *limit, int *shift) | ||
248 | { | ||
249 | for (; dptr <= limit && *dptr == ' '; dptr++) | ||
250 | (*shift)++; | ||
251 | |||
252 | return digits_len(ct, dptr, limit, shift); | ||
253 | } | ||
254 | |||
255 | static int parse_addr(struct nf_conn *ct, const char *cp, const char **endp, | ||
256 | union nf_conntrack_address *addr, const char *limit) | ||
257 | { | ||
258 | const char *end; | ||
259 | int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; | ||
260 | int ret = 0; | ||
261 | |||
262 | switch (family) { | ||
263 | case AF_INET: | ||
264 | ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); | ||
265 | break; | ||
266 | case AF_INET6: | ||
267 | ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); | ||
268 | break; | ||
269 | default: | ||
270 | BUG(); | ||
271 | } | ||
272 | |||
273 | if (ret == 0 || end == cp) | ||
274 | return 0; | ||
275 | if (endp) | ||
276 | *endp = end; | ||
277 | return 1; | ||
278 | } | ||
279 | |||
280 | /* skip ip address. returns its length. */ | ||
281 | static int epaddr_len(struct nf_conn *ct, const char *dptr, | ||
282 | const char *limit, int *shift) | ||
283 | { | ||
284 | union nf_conntrack_address addr; | ||
285 | const char *aux = dptr; | ||
286 | |||
287 | if (!parse_addr(ct, dptr, &dptr, &addr, limit)) { | ||
288 | DEBUGP("ip: %s parse failed.!\n", dptr); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | /* Port number */ | ||
293 | if (*dptr == ':') { | ||
294 | dptr++; | ||
295 | dptr += digits_len(ct, dptr, limit, shift); | ||
296 | } | ||
297 | return dptr - aux; | ||
298 | } | ||
299 | |||
300 | /* get address length, skiping user info. */ | ||
301 | static int skp_epaddr_len(struct nf_conn *ct, const char *dptr, | ||
302 | const char *limit, int *shift) | ||
303 | { | ||
304 | int s = *shift; | ||
305 | |||
306 | for (; dptr <= limit && *dptr != '@'; dptr++) | ||
307 | (*shift)++; | ||
308 | |||
309 | if (*dptr == '@') { | ||
310 | dptr++; | ||
311 | (*shift)++; | ||
312 | } else | ||
313 | *shift = s; | ||
314 | |||
315 | return epaddr_len(ct, dptr, limit, shift); | ||
316 | } | ||
317 | |||
318 | /* Returns 0 if not found, -1 error parsing. */ | ||
319 | int ct_sip_get_info(struct nf_conn *ct, | ||
320 | const char *dptr, size_t dlen, | ||
321 | unsigned int *matchoff, | ||
322 | unsigned int *matchlen, | ||
323 | enum sip_header_pos pos) | ||
324 | { | ||
325 | const struct sip_header_nfo *hnfo = &ct_sip_hdrs[pos]; | ||
326 | const char *limit, *aux, *k = dptr; | ||
327 | int shift = 0; | ||
328 | |||
329 | limit = dptr + (dlen - hnfo->lnlen); | ||
330 | |||
331 | while (dptr <= limit) { | ||
332 | if ((strncmp(dptr, hnfo->lname, hnfo->lnlen) != 0) && | ||
333 | (strncmp(dptr, hnfo->sname, hnfo->snlen) != 0)) { | ||
334 | dptr++; | ||
335 | continue; | ||
336 | } | ||
337 | aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen, | ||
338 | ct_sip_lnlen(dptr, limit), | ||
339 | hnfo->case_sensitive); | ||
340 | if (!aux) { | ||
341 | DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str, | ||
342 | hnfo->lname); | ||
343 | return -1; | ||
344 | } | ||
345 | aux += hnfo->ln_strlen; | ||
346 | |||
347 | *matchlen = hnfo->match_len(ct, aux, limit, &shift); | ||
348 | if (!*matchlen) | ||
349 | return -1; | ||
350 | |||
351 | *matchoff = (aux - k) + shift; | ||
352 | |||
353 | DEBUGP("%s match succeeded! - len: %u\n", hnfo->lname, | ||
354 | *matchlen); | ||
355 | return 1; | ||
356 | } | ||
357 | DEBUGP("%s header not found.\n", hnfo->lname); | ||
358 | return 0; | ||
359 | } | ||
360 | EXPORT_SYMBOL_GPL(ct_sip_get_info); | ||
361 | |||
362 | static int set_expected_rtp(struct sk_buff **pskb, | ||
363 | struct nf_conn *ct, | ||
364 | enum ip_conntrack_info ctinfo, | ||
365 | union nf_conntrack_address *addr, | ||
366 | __be16 port, | ||
367 | const char *dptr) | ||
368 | { | ||
369 | struct nf_conntrack_expect *exp; | ||
370 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | ||
371 | int family = ct->tuplehash[!dir].tuple.src.l3num; | ||
372 | int ret; | ||
373 | typeof(nf_nat_sdp_hook) nf_nat_sdp; | ||
374 | |||
375 | exp = nf_conntrack_expect_alloc(ct); | ||
376 | if (exp == NULL) | ||
377 | return NF_DROP; | ||
378 | nf_conntrack_expect_init(exp, family, | ||
379 | &ct->tuplehash[!dir].tuple.src.u3, addr, | ||
380 | IPPROTO_UDP, NULL, &port); | ||
381 | |||
382 | nf_nat_sdp = rcu_dereference(nf_nat_sdp_hook); | ||
383 | if (nf_nat_sdp && ct->status & IPS_NAT_MASK) | ||
384 | ret = nf_nat_sdp(pskb, ctinfo, exp, dptr); | ||
385 | else { | ||
386 | if (nf_conntrack_expect_related(exp) != 0) | ||
387 | ret = NF_DROP; | ||
388 | else | ||
389 | ret = NF_ACCEPT; | ||
390 | } | ||
391 | nf_conntrack_expect_put(exp); | ||
392 | |||
393 | return ret; | ||
394 | } | ||
395 | |||
396 | static int sip_help(struct sk_buff **pskb, | ||
397 | unsigned int protoff, | ||
398 | struct nf_conn *ct, | ||
399 | enum ip_conntrack_info ctinfo) | ||
400 | { | ||
401 | int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; | ||
402 | union nf_conntrack_address addr; | ||
403 | unsigned int dataoff, datalen; | ||
404 | const char *dptr; | ||
405 | int ret = NF_ACCEPT; | ||
406 | int matchoff, matchlen; | ||
407 | u_int16_t port; | ||
408 | enum sip_header_pos pos; | ||
409 | typeof(nf_nat_sip_hook) nf_nat_sip; | ||
410 | |||
411 | /* No Data ? */ | ||
412 | dataoff = protoff + sizeof(struct udphdr); | ||
413 | if (dataoff >= (*pskb)->len) | ||
414 | return NF_ACCEPT; | ||
415 | |||
416 | nf_ct_refresh(ct, *pskb, sip_timeout * HZ); | ||
417 | |||
418 | if (!skb_is_nonlinear(*pskb)) | ||
419 | dptr = (*pskb)->data + dataoff; | ||
420 | else { | ||
421 | DEBUGP("Copy of skbuff not supported yet.\n"); | ||
422 | goto out; | ||
423 | } | ||
424 | |||
425 | nf_nat_sip = rcu_dereference(nf_nat_sip_hook); | ||
426 | if (nf_nat_sip && ct->status & IPS_NAT_MASK) { | ||
427 | if (!nf_nat_sip(pskb, ctinfo, ct, &dptr)) { | ||
428 | ret = NF_DROP; | ||
429 | goto out; | ||
430 | } | ||
431 | } | ||
432 | |||
433 | datalen = (*pskb)->len - dataoff; | ||
434 | if (datalen < sizeof("SIP/2.0 200") - 1) | ||
435 | goto out; | ||
436 | |||
437 | /* RTP info only in some SDP pkts */ | ||
438 | if (memcmp(dptr, "INVITE", sizeof("INVITE") - 1) != 0 && | ||
439 | memcmp(dptr, "SIP/2.0 200", sizeof("SIP/2.0 200") - 1) != 0) { | ||
440 | goto out; | ||
441 | } | ||
442 | /* Get address and port from SDP packet. */ | ||
443 | pos = family == AF_INET ? POS_CONNECTION_IP4 : POS_CONNECTION_IP6; | ||
444 | if (ct_sip_get_info(ct, dptr, datalen, &matchoff, &matchlen, pos) > 0) { | ||
445 | |||
446 | /* We'll drop only if there are parse problems. */ | ||
447 | if (!parse_addr(ct, dptr + matchoff, NULL, &addr, | ||
448 | dptr + datalen)) { | ||
449 | ret = NF_DROP; | ||
450 | goto out; | ||
451 | } | ||
452 | if (ct_sip_get_info(ct, dptr, datalen, &matchoff, &matchlen, | ||
453 | POS_MEDIA) > 0) { | ||
454 | |||
455 | port = simple_strtoul(dptr + matchoff, NULL, 10); | ||
456 | if (port < 1024) { | ||
457 | ret = NF_DROP; | ||
458 | goto out; | ||
459 | } | ||
460 | ret = set_expected_rtp(pskb, ct, ctinfo, &addr, | ||
461 | htons(port), dptr); | ||
462 | } | ||
463 | } | ||
464 | out: | ||
465 | return ret; | ||
466 | } | ||
467 | |||
468 | static struct nf_conntrack_helper sip[MAX_PORTS][2] __read_mostly; | ||
469 | static char sip_names[MAX_PORTS][2][sizeof("sip-65535")] __read_mostly; | ||
470 | |||
471 | static void nf_conntrack_sip_fini(void) | ||
472 | { | ||
473 | int i, j; | ||
474 | |||
475 | for (i = 0; i < ports_c; i++) { | ||
476 | for (j = 0; j < 2; j++) { | ||
477 | if (sip[i][j].me == NULL) | ||
478 | continue; | ||
479 | nf_conntrack_helper_unregister(&sip[i][j]); | ||
480 | } | ||
481 | } | ||
482 | } | ||
483 | |||
484 | static int __init nf_conntrack_sip_init(void) | ||
485 | { | ||
486 | int i, j, ret; | ||
487 | char *tmpname; | ||
488 | |||
489 | if (ports_c == 0) | ||
490 | ports[ports_c++] = SIP_PORT; | ||
491 | |||
492 | for (i = 0; i < ports_c; i++) { | ||
493 | memset(&sip[i], 0, sizeof(sip[i])); | ||
494 | |||
495 | sip[i][0].tuple.src.l3num = AF_INET; | ||
496 | sip[i][1].tuple.src.l3num = AF_INET6; | ||
497 | for (j = 0; j < 2; j++) { | ||
498 | sip[i][j].tuple.dst.protonum = IPPROTO_UDP; | ||
499 | sip[i][j].tuple.src.u.udp.port = htons(ports[i]); | ||
500 | sip[i][j].mask.src.l3num = 0xFFFF; | ||
501 | sip[i][j].mask.src.u.udp.port = htons(0xFFFF); | ||
502 | sip[i][j].mask.dst.protonum = 0xFF; | ||
503 | sip[i][j].max_expected = 2; | ||
504 | sip[i][j].timeout = 3 * 60; /* 3 minutes */ | ||
505 | sip[i][j].me = THIS_MODULE; | ||
506 | sip[i][j].help = sip_help; | ||
507 | |||
508 | tmpname = &sip_names[i][j][0]; | ||
509 | if (ports[i] == SIP_PORT) | ||
510 | sprintf(tmpname, "sip"); | ||
511 | else | ||
512 | sprintf(tmpname, "sip-%u", i); | ||
513 | sip[i][j].name = tmpname; | ||
514 | |||
515 | DEBUGP("port #%u: %u\n", i, ports[i]); | ||
516 | |||
517 | ret = nf_conntrack_helper_register(&sip[i][j]); | ||
518 | if (ret) { | ||
519 | printk("nf_ct_sip: failed to register helper " | ||
520 | "for pf: %u port: %u\n", | ||
521 | sip[i][j].tuple.src.l3num, ports[i]); | ||
522 | nf_conntrack_sip_fini(); | ||
523 | return ret; | ||
524 | } | ||
525 | } | ||
526 | } | ||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | module_init(nf_conntrack_sip_init); | ||
531 | module_exit(nf_conntrack_sip_fini); | ||
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 5954f6773810..f1cb60ff9319 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
@@ -29,13 +29,11 @@ | |||
29 | #include <linux/sysctl.h> | 29 | #include <linux/sysctl.h> |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #define ASSERT_READ_LOCK(x) | ||
33 | #define ASSERT_WRITE_LOCK(x) | ||
34 | |||
35 | #include <net/netfilter/nf_conntrack.h> | 32 | #include <net/netfilter/nf_conntrack.h> |
36 | #include <net/netfilter/nf_conntrack_l3proto.h> | ||
37 | #include <net/netfilter/nf_conntrack_protocol.h> | ||
38 | #include <net/netfilter/nf_conntrack_core.h> | 33 | #include <net/netfilter/nf_conntrack_core.h> |
34 | #include <net/netfilter/nf_conntrack_l3proto.h> | ||
35 | #include <net/netfilter/nf_conntrack_l4proto.h> | ||
36 | #include <net/netfilter/nf_conntrack_expect.h> | ||
39 | #include <net/netfilter/nf_conntrack_helper.h> | 37 | #include <net/netfilter/nf_conntrack_helper.h> |
40 | 38 | ||
41 | #if 0 | 39 | #if 0 |
@@ -46,33 +44,15 @@ | |||
46 | 44 | ||
47 | MODULE_LICENSE("GPL"); | 45 | MODULE_LICENSE("GPL"); |
48 | 46 | ||
49 | extern atomic_t nf_conntrack_count; | ||
50 | DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); | ||
51 | |||
52 | static int kill_l3proto(struct nf_conn *i, void *data) | ||
53 | { | ||
54 | return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num == | ||
55 | ((struct nf_conntrack_l3proto *)data)->l3proto); | ||
56 | } | ||
57 | |||
58 | static int kill_proto(struct nf_conn *i, void *data) | ||
59 | { | ||
60 | struct nf_conntrack_protocol *proto; | ||
61 | proto = (struct nf_conntrack_protocol *)data; | ||
62 | return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == | ||
63 | proto->proto) && | ||
64 | (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num == | ||
65 | proto->l3proto); | ||
66 | } | ||
67 | |||
68 | #ifdef CONFIG_PROC_FS | 47 | #ifdef CONFIG_PROC_FS |
69 | static int | 48 | int |
70 | print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, | 49 | print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, |
71 | struct nf_conntrack_l3proto *l3proto, | 50 | struct nf_conntrack_l3proto *l3proto, |
72 | struct nf_conntrack_protocol *proto) | 51 | struct nf_conntrack_l4proto *l4proto) |
73 | { | 52 | { |
74 | return l3proto->print_tuple(s, tuple) || proto->print_tuple(s, tuple); | 53 | return l3proto->print_tuple(s, tuple) || l4proto->print_tuple(s, tuple); |
75 | } | 54 | } |
55 | EXPORT_SYMBOL_GPL(print_tuple); | ||
76 | 56 | ||
77 | #ifdef CONFIG_NF_CT_ACCT | 57 | #ifdef CONFIG_NF_CT_ACCT |
78 | static unsigned int | 58 | static unsigned int |
@@ -150,9 +130,8 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
150 | const struct nf_conntrack_tuple_hash *hash = v; | 130 | const struct nf_conntrack_tuple_hash *hash = v; |
151 | const struct nf_conn *conntrack = nf_ct_tuplehash_to_ctrack(hash); | 131 | const struct nf_conn *conntrack = nf_ct_tuplehash_to_ctrack(hash); |
152 | struct nf_conntrack_l3proto *l3proto; | 132 | struct nf_conntrack_l3proto *l3proto; |
153 | struct nf_conntrack_protocol *proto; | 133 | struct nf_conntrack_l4proto *l4proto; |
154 | 134 | ||
155 | ASSERT_READ_LOCK(&nf_conntrack_lock); | ||
156 | NF_CT_ASSERT(conntrack); | 135 | NF_CT_ASSERT(conntrack); |
157 | 136 | ||
158 | /* we only want to print DIR_ORIGINAL */ | 137 | /* we only want to print DIR_ORIGINAL */ |
@@ -163,16 +142,16 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
163 | .tuple.src.l3num); | 142 | .tuple.src.l3num); |
164 | 143 | ||
165 | NF_CT_ASSERT(l3proto); | 144 | NF_CT_ASSERT(l3proto); |
166 | proto = __nf_ct_proto_find(conntrack->tuplehash[IP_CT_DIR_ORIGINAL] | 145 | l4proto = __nf_ct_l4proto_find(conntrack->tuplehash[IP_CT_DIR_ORIGINAL] |
167 | .tuple.src.l3num, | 146 | .tuple.src.l3num, |
168 | conntrack->tuplehash[IP_CT_DIR_ORIGINAL] | 147 | conntrack->tuplehash[IP_CT_DIR_ORIGINAL] |
169 | .tuple.dst.protonum); | 148 | .tuple.dst.protonum); |
170 | NF_CT_ASSERT(proto); | 149 | NF_CT_ASSERT(l4proto); |
171 | 150 | ||
172 | if (seq_printf(s, "%-8s %u %-8s %u %ld ", | 151 | if (seq_printf(s, "%-8s %u %-8s %u %ld ", |
173 | l3proto->name, | 152 | l3proto->name, |
174 | conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num, | 153 | conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num, |
175 | proto->name, | 154 | l4proto->name, |
176 | conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum, | 155 | conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum, |
177 | timer_pending(&conntrack->timeout) | 156 | timer_pending(&conntrack->timeout) |
178 | ? (long)(conntrack->timeout.expires - jiffies)/HZ : 0) != 0) | 157 | ? (long)(conntrack->timeout.expires - jiffies)/HZ : 0) != 0) |
@@ -181,11 +160,11 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
181 | if (l3proto->print_conntrack(s, conntrack)) | 160 | if (l3proto->print_conntrack(s, conntrack)) |
182 | return -ENOSPC; | 161 | return -ENOSPC; |
183 | 162 | ||
184 | if (proto->print_conntrack(s, conntrack)) | 163 | if (l4proto->print_conntrack(s, conntrack)) |
185 | return -ENOSPC; | 164 | return -ENOSPC; |
186 | 165 | ||
187 | if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | 166 | if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
188 | l3proto, proto)) | 167 | l3proto, l4proto)) |
189 | return -ENOSPC; | 168 | return -ENOSPC; |
190 | 169 | ||
191 | if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL])) | 170 | if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL])) |
@@ -196,7 +175,7 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
196 | return -ENOSPC; | 175 | return -ENOSPC; |
197 | 176 | ||
198 | if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple, | 177 | if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple, |
199 | l3proto, proto)) | 178 | l3proto, l4proto)) |
200 | return -ENOSPC; | 179 | return -ENOSPC; |
201 | 180 | ||
202 | if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY])) | 181 | if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY])) |
@@ -258,84 +237,6 @@ static struct file_operations ct_file_ops = { | |||
258 | .release = seq_release_private, | 237 | .release = seq_release_private, |
259 | }; | 238 | }; |
260 | 239 | ||
261 | /* expects */ | ||
262 | static void *exp_seq_start(struct seq_file *s, loff_t *pos) | ||
263 | { | ||
264 | struct list_head *e = &nf_conntrack_expect_list; | ||
265 | loff_t i; | ||
266 | |||
267 | /* strange seq_file api calls stop even if we fail, | ||
268 | * thus we need to grab lock since stop unlocks */ | ||
269 | read_lock_bh(&nf_conntrack_lock); | ||
270 | |||
271 | if (list_empty(e)) | ||
272 | return NULL; | ||
273 | |||
274 | for (i = 0; i <= *pos; i++) { | ||
275 | e = e->next; | ||
276 | if (e == &nf_conntrack_expect_list) | ||
277 | return NULL; | ||
278 | } | ||
279 | return e; | ||
280 | } | ||
281 | |||
282 | static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) | ||
283 | { | ||
284 | struct list_head *e = v; | ||
285 | |||
286 | ++*pos; | ||
287 | e = e->next; | ||
288 | |||
289 | if (e == &nf_conntrack_expect_list) | ||
290 | return NULL; | ||
291 | |||
292 | return e; | ||
293 | } | ||
294 | |||
295 | static void exp_seq_stop(struct seq_file *s, void *v) | ||
296 | { | ||
297 | read_unlock_bh(&nf_conntrack_lock); | ||
298 | } | ||
299 | |||
300 | static int exp_seq_show(struct seq_file *s, void *v) | ||
301 | { | ||
302 | struct nf_conntrack_expect *expect = v; | ||
303 | |||
304 | if (expect->timeout.function) | ||
305 | seq_printf(s, "%ld ", timer_pending(&expect->timeout) | ||
306 | ? (long)(expect->timeout.expires - jiffies)/HZ : 0); | ||
307 | else | ||
308 | seq_printf(s, "- "); | ||
309 | seq_printf(s, "l3proto = %u proto=%u ", | ||
310 | expect->tuple.src.l3num, | ||
311 | expect->tuple.dst.protonum); | ||
312 | print_tuple(s, &expect->tuple, | ||
313 | __nf_ct_l3proto_find(expect->tuple.src.l3num), | ||
314 | __nf_ct_proto_find(expect->tuple.src.l3num, | ||
315 | expect->tuple.dst.protonum)); | ||
316 | return seq_putc(s, '\n'); | ||
317 | } | ||
318 | |||
319 | static struct seq_operations exp_seq_ops = { | ||
320 | .start = exp_seq_start, | ||
321 | .next = exp_seq_next, | ||
322 | .stop = exp_seq_stop, | ||
323 | .show = exp_seq_show | ||
324 | }; | ||
325 | |||
326 | static int exp_open(struct inode *inode, struct file *file) | ||
327 | { | ||
328 | return seq_open(file, &exp_seq_ops); | ||
329 | } | ||
330 | |||
331 | static struct file_operations exp_file_ops = { | ||
332 | .owner = THIS_MODULE, | ||
333 | .open = exp_open, | ||
334 | .read = seq_read, | ||
335 | .llseek = seq_lseek, | ||
336 | .release = seq_release | ||
337 | }; | ||
338 | |||
339 | static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) | 240 | static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) |
340 | { | 241 | { |
341 | int cpu; | 242 | int cpu; |
@@ -428,34 +329,9 @@ static struct file_operations ct_cpu_seq_fops = { | |||
428 | /* Sysctl support */ | 329 | /* Sysctl support */ |
429 | 330 | ||
430 | int nf_conntrack_checksum __read_mostly = 1; | 331 | int nf_conntrack_checksum __read_mostly = 1; |
332 | EXPORT_SYMBOL_GPL(nf_conntrack_checksum); | ||
431 | 333 | ||
432 | #ifdef CONFIG_SYSCTL | 334 | #ifdef CONFIG_SYSCTL |
433 | |||
434 | /* From nf_conntrack_core.c */ | ||
435 | extern int nf_conntrack_max; | ||
436 | extern unsigned int nf_conntrack_htable_size; | ||
437 | |||
438 | /* From nf_conntrack_proto_tcp.c */ | ||
439 | extern unsigned int nf_ct_tcp_timeout_syn_sent; | ||
440 | extern unsigned int nf_ct_tcp_timeout_syn_recv; | ||
441 | extern unsigned int nf_ct_tcp_timeout_established; | ||
442 | extern unsigned int nf_ct_tcp_timeout_fin_wait; | ||
443 | extern unsigned int nf_ct_tcp_timeout_close_wait; | ||
444 | extern unsigned int nf_ct_tcp_timeout_last_ack; | ||
445 | extern unsigned int nf_ct_tcp_timeout_time_wait; | ||
446 | extern unsigned int nf_ct_tcp_timeout_close; | ||
447 | extern unsigned int nf_ct_tcp_timeout_max_retrans; | ||
448 | extern int nf_ct_tcp_loose; | ||
449 | extern int nf_ct_tcp_be_liberal; | ||
450 | extern int nf_ct_tcp_max_retrans; | ||
451 | |||
452 | /* From nf_conntrack_proto_udp.c */ | ||
453 | extern unsigned int nf_ct_udp_timeout; | ||
454 | extern unsigned int nf_ct_udp_timeout_stream; | ||
455 | |||
456 | /* From nf_conntrack_proto_generic.c */ | ||
457 | extern unsigned int nf_ct_generic_timeout; | ||
458 | |||
459 | /* Log invalid packets of a given protocol */ | 335 | /* Log invalid packets of a given protocol */ |
460 | static int log_invalid_proto_min = 0; | 336 | static int log_invalid_proto_min = 0; |
461 | static int log_invalid_proto_max = 255; | 337 | static int log_invalid_proto_max = 255; |
@@ -496,94 +372,6 @@ static ctl_table nf_ct_sysctl_table[] = { | |||
496 | .proc_handler = &proc_dointvec, | 372 | .proc_handler = &proc_dointvec, |
497 | }, | 373 | }, |
498 | { | 374 | { |
499 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, | ||
500 | .procname = "nf_conntrack_tcp_timeout_syn_sent", | ||
501 | .data = &nf_ct_tcp_timeout_syn_sent, | ||
502 | .maxlen = sizeof(unsigned int), | ||
503 | .mode = 0644, | ||
504 | .proc_handler = &proc_dointvec_jiffies, | ||
505 | }, | ||
506 | { | ||
507 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, | ||
508 | .procname = "nf_conntrack_tcp_timeout_syn_recv", | ||
509 | .data = &nf_ct_tcp_timeout_syn_recv, | ||
510 | .maxlen = sizeof(unsigned int), | ||
511 | .mode = 0644, | ||
512 | .proc_handler = &proc_dointvec_jiffies, | ||
513 | }, | ||
514 | { | ||
515 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, | ||
516 | .procname = "nf_conntrack_tcp_timeout_established", | ||
517 | .data = &nf_ct_tcp_timeout_established, | ||
518 | .maxlen = sizeof(unsigned int), | ||
519 | .mode = 0644, | ||
520 | .proc_handler = &proc_dointvec_jiffies, | ||
521 | }, | ||
522 | { | ||
523 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, | ||
524 | .procname = "nf_conntrack_tcp_timeout_fin_wait", | ||
525 | .data = &nf_ct_tcp_timeout_fin_wait, | ||
526 | .maxlen = sizeof(unsigned int), | ||
527 | .mode = 0644, | ||
528 | .proc_handler = &proc_dointvec_jiffies, | ||
529 | }, | ||
530 | { | ||
531 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, | ||
532 | .procname = "nf_conntrack_tcp_timeout_close_wait", | ||
533 | .data = &nf_ct_tcp_timeout_close_wait, | ||
534 | .maxlen = sizeof(unsigned int), | ||
535 | .mode = 0644, | ||
536 | .proc_handler = &proc_dointvec_jiffies, | ||
537 | }, | ||
538 | { | ||
539 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, | ||
540 | .procname = "nf_conntrack_tcp_timeout_last_ack", | ||
541 | .data = &nf_ct_tcp_timeout_last_ack, | ||
542 | .maxlen = sizeof(unsigned int), | ||
543 | .mode = 0644, | ||
544 | .proc_handler = &proc_dointvec_jiffies, | ||
545 | }, | ||
546 | { | ||
547 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, | ||
548 | .procname = "nf_conntrack_tcp_timeout_time_wait", | ||
549 | .data = &nf_ct_tcp_timeout_time_wait, | ||
550 | .maxlen = sizeof(unsigned int), | ||
551 | .mode = 0644, | ||
552 | .proc_handler = &proc_dointvec_jiffies, | ||
553 | }, | ||
554 | { | ||
555 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, | ||
556 | .procname = "nf_conntrack_tcp_timeout_close", | ||
557 | .data = &nf_ct_tcp_timeout_close, | ||
558 | .maxlen = sizeof(unsigned int), | ||
559 | .mode = 0644, | ||
560 | .proc_handler = &proc_dointvec_jiffies, | ||
561 | }, | ||
562 | { | ||
563 | .ctl_name = NET_NF_CONNTRACK_UDP_TIMEOUT, | ||
564 | .procname = "nf_conntrack_udp_timeout", | ||
565 | .data = &nf_ct_udp_timeout, | ||
566 | .maxlen = sizeof(unsigned int), | ||
567 | .mode = 0644, | ||
568 | .proc_handler = &proc_dointvec_jiffies, | ||
569 | }, | ||
570 | { | ||
571 | .ctl_name = NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM, | ||
572 | .procname = "nf_conntrack_udp_timeout_stream", | ||
573 | .data = &nf_ct_udp_timeout_stream, | ||
574 | .maxlen = sizeof(unsigned int), | ||
575 | .mode = 0644, | ||
576 | .proc_handler = &proc_dointvec_jiffies, | ||
577 | }, | ||
578 | { | ||
579 | .ctl_name = NET_NF_CONNTRACK_GENERIC_TIMEOUT, | ||
580 | .procname = "nf_conntrack_generic_timeout", | ||
581 | .data = &nf_ct_generic_timeout, | ||
582 | .maxlen = sizeof(unsigned int), | ||
583 | .mode = 0644, | ||
584 | .proc_handler = &proc_dointvec_jiffies, | ||
585 | }, | ||
586 | { | ||
587 | .ctl_name = NET_NF_CONNTRACK_LOG_INVALID, | 375 | .ctl_name = NET_NF_CONNTRACK_LOG_INVALID, |
588 | .procname = "nf_conntrack_log_invalid", | 376 | .procname = "nf_conntrack_log_invalid", |
589 | .data = &nf_ct_log_invalid, | 377 | .data = &nf_ct_log_invalid, |
@@ -594,38 +382,6 @@ static ctl_table nf_ct_sysctl_table[] = { | |||
594 | .extra1 = &log_invalid_proto_min, | 382 | .extra1 = &log_invalid_proto_min, |
595 | .extra2 = &log_invalid_proto_max, | 383 | .extra2 = &log_invalid_proto_max, |
596 | }, | 384 | }, |
597 | { | ||
598 | .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, | ||
599 | .procname = "nf_conntrack_tcp_timeout_max_retrans", | ||
600 | .data = &nf_ct_tcp_timeout_max_retrans, | ||
601 | .maxlen = sizeof(unsigned int), | ||
602 | .mode = 0644, | ||
603 | .proc_handler = &proc_dointvec_jiffies, | ||
604 | }, | ||
605 | { | ||
606 | .ctl_name = NET_NF_CONNTRACK_TCP_LOOSE, | ||
607 | .procname = "nf_conntrack_tcp_loose", | ||
608 | .data = &nf_ct_tcp_loose, | ||
609 | .maxlen = sizeof(unsigned int), | ||
610 | .mode = 0644, | ||
611 | .proc_handler = &proc_dointvec, | ||
612 | }, | ||
613 | { | ||
614 | .ctl_name = NET_NF_CONNTRACK_TCP_BE_LIBERAL, | ||
615 | .procname = "nf_conntrack_tcp_be_liberal", | ||
616 | .data = &nf_ct_tcp_be_liberal, | ||
617 | .maxlen = sizeof(unsigned int), | ||
618 | .mode = 0644, | ||
619 | .proc_handler = &proc_dointvec, | ||
620 | }, | ||
621 | { | ||
622 | .ctl_name = NET_NF_CONNTRACK_TCP_MAX_RETRANS, | ||
623 | .procname = "nf_conntrack_tcp_max_retrans", | ||
624 | .data = &nf_ct_tcp_max_retrans, | ||
625 | .maxlen = sizeof(unsigned int), | ||
626 | .mode = 0644, | ||
627 | .proc_handler = &proc_dointvec, | ||
628 | }, | ||
629 | 385 | ||
630 | { .ctl_name = 0 } | 386 | { .ctl_name = 0 } |
631 | }; | 387 | }; |
@@ -659,109 +415,9 @@ static ctl_table nf_ct_net_table[] = { | |||
659 | }, | 415 | }, |
660 | { .ctl_name = 0 } | 416 | { .ctl_name = 0 } |
661 | }; | 417 | }; |
662 | EXPORT_SYMBOL(nf_ct_log_invalid); | 418 | EXPORT_SYMBOL_GPL(nf_ct_log_invalid); |
663 | #endif /* CONFIG_SYSCTL */ | 419 | #endif /* CONFIG_SYSCTL */ |
664 | 420 | ||
665 | int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto) | ||
666 | { | ||
667 | int ret = 0; | ||
668 | |||
669 | write_lock_bh(&nf_conntrack_lock); | ||
670 | if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_generic_l3proto) { | ||
671 | ret = -EBUSY; | ||
672 | goto out; | ||
673 | } | ||
674 | nf_ct_l3protos[proto->l3proto] = proto; | ||
675 | out: | ||
676 | write_unlock_bh(&nf_conntrack_lock); | ||
677 | |||
678 | return ret; | ||
679 | } | ||
680 | |||
681 | void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto) | ||
682 | { | ||
683 | write_lock_bh(&nf_conntrack_lock); | ||
684 | nf_ct_l3protos[proto->l3proto] = &nf_conntrack_generic_l3proto; | ||
685 | write_unlock_bh(&nf_conntrack_lock); | ||
686 | |||
687 | /* Somebody could be still looking at the proto in bh. */ | ||
688 | synchronize_net(); | ||
689 | |||
690 | /* Remove all contrack entries for this protocol */ | ||
691 | nf_ct_iterate_cleanup(kill_l3proto, proto); | ||
692 | } | ||
693 | |||
694 | /* FIXME: Allow NULL functions and sub in pointers to generic for | ||
695 | them. --RR */ | ||
696 | int nf_conntrack_protocol_register(struct nf_conntrack_protocol *proto) | ||
697 | { | ||
698 | int ret = 0; | ||
699 | |||
700 | retry: | ||
701 | write_lock_bh(&nf_conntrack_lock); | ||
702 | if (nf_ct_protos[proto->l3proto]) { | ||
703 | if (nf_ct_protos[proto->l3proto][proto->proto] | ||
704 | != &nf_conntrack_generic_protocol) { | ||
705 | ret = -EBUSY; | ||
706 | goto out_unlock; | ||
707 | } | ||
708 | } else { | ||
709 | /* l3proto may be loaded latter. */ | ||
710 | struct nf_conntrack_protocol **proto_array; | ||
711 | int i; | ||
712 | |||
713 | write_unlock_bh(&nf_conntrack_lock); | ||
714 | |||
715 | proto_array = (struct nf_conntrack_protocol **) | ||
716 | kmalloc(MAX_NF_CT_PROTO * | ||
717 | sizeof(struct nf_conntrack_protocol *), | ||
718 | GFP_KERNEL); | ||
719 | if (proto_array == NULL) { | ||
720 | ret = -ENOMEM; | ||
721 | goto out; | ||
722 | } | ||
723 | for (i = 0; i < MAX_NF_CT_PROTO; i++) | ||
724 | proto_array[i] = &nf_conntrack_generic_protocol; | ||
725 | |||
726 | write_lock_bh(&nf_conntrack_lock); | ||
727 | if (nf_ct_protos[proto->l3proto]) { | ||
728 | /* bad timing, but no problem */ | ||
729 | write_unlock_bh(&nf_conntrack_lock); | ||
730 | kfree(proto_array); | ||
731 | } else { | ||
732 | nf_ct_protos[proto->l3proto] = proto_array; | ||
733 | write_unlock_bh(&nf_conntrack_lock); | ||
734 | } | ||
735 | |||
736 | /* | ||
737 | * Just once because array is never freed until unloading | ||
738 | * nf_conntrack.ko | ||
739 | */ | ||
740 | goto retry; | ||
741 | } | ||
742 | |||
743 | nf_ct_protos[proto->l3proto][proto->proto] = proto; | ||
744 | |||
745 | out_unlock: | ||
746 | write_unlock_bh(&nf_conntrack_lock); | ||
747 | out: | ||
748 | return ret; | ||
749 | } | ||
750 | |||
751 | void nf_conntrack_protocol_unregister(struct nf_conntrack_protocol *proto) | ||
752 | { | ||
753 | write_lock_bh(&nf_conntrack_lock); | ||
754 | nf_ct_protos[proto->l3proto][proto->proto] | ||
755 | = &nf_conntrack_generic_protocol; | ||
756 | write_unlock_bh(&nf_conntrack_lock); | ||
757 | |||
758 | /* Somebody could be still looking at the proto in bh. */ | ||
759 | synchronize_net(); | ||
760 | |||
761 | /* Remove all contrack entries for this protocol */ | ||
762 | nf_ct_iterate_cleanup(kill_proto, proto); | ||
763 | } | ||
764 | |||
765 | static int __init nf_conntrack_standalone_init(void) | 421 | static int __init nf_conntrack_standalone_init(void) |
766 | { | 422 | { |
767 | #ifdef CONFIG_PROC_FS | 423 | #ifdef CONFIG_PROC_FS |
@@ -834,70 +490,4 @@ module_exit(nf_conntrack_standalone_fini); | |||
834 | void need_conntrack(void) | 490 | void need_conntrack(void) |
835 | { | 491 | { |
836 | } | 492 | } |
837 | 493 | EXPORT_SYMBOL_GPL(need_conntrack); | |
838 | #ifdef CONFIG_NF_CONNTRACK_EVENTS | ||
839 | EXPORT_SYMBOL_GPL(nf_conntrack_chain); | ||
840 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_chain); | ||
841 | EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier); | ||
842 | EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); | ||
843 | EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init); | ||
844 | EXPORT_PER_CPU_SYMBOL_GPL(nf_conntrack_ecache); | ||
845 | EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); | ||
846 | #endif | ||
847 | EXPORT_SYMBOL(nf_ct_l3proto_try_module_get); | ||
848 | EXPORT_SYMBOL(nf_ct_l3proto_module_put); | ||
849 | EXPORT_SYMBOL(nf_conntrack_l3proto_register); | ||
850 | EXPORT_SYMBOL(nf_conntrack_l3proto_unregister); | ||
851 | EXPORT_SYMBOL(nf_conntrack_protocol_register); | ||
852 | EXPORT_SYMBOL(nf_conntrack_protocol_unregister); | ||
853 | EXPORT_SYMBOL(nf_ct_invert_tuplepr); | ||
854 | EXPORT_SYMBOL(nf_conntrack_destroyed); | ||
855 | EXPORT_SYMBOL(need_conntrack); | ||
856 | EXPORT_SYMBOL(nf_conntrack_helper_register); | ||
857 | EXPORT_SYMBOL(nf_conntrack_helper_unregister); | ||
858 | EXPORT_SYMBOL(nf_ct_iterate_cleanup); | ||
859 | EXPORT_SYMBOL(__nf_ct_refresh_acct); | ||
860 | EXPORT_SYMBOL(nf_ct_protos); | ||
861 | EXPORT_SYMBOL(__nf_ct_proto_find); | ||
862 | EXPORT_SYMBOL(nf_ct_proto_find_get); | ||
863 | EXPORT_SYMBOL(nf_ct_proto_put); | ||
864 | EXPORT_SYMBOL(nf_ct_l3proto_find_get); | ||
865 | EXPORT_SYMBOL(nf_ct_l3proto_put); | ||
866 | EXPORT_SYMBOL(nf_ct_l3protos); | ||
867 | EXPORT_SYMBOL_GPL(nf_conntrack_checksum); | ||
868 | EXPORT_SYMBOL(nf_conntrack_expect_alloc); | ||
869 | EXPORT_SYMBOL(nf_conntrack_expect_put); | ||
870 | EXPORT_SYMBOL(nf_conntrack_expect_related); | ||
871 | EXPORT_SYMBOL(nf_conntrack_unexpect_related); | ||
872 | EXPORT_SYMBOL(nf_conntrack_tuple_taken); | ||
873 | EXPORT_SYMBOL(nf_conntrack_htable_size); | ||
874 | EXPORT_SYMBOL(nf_conntrack_lock); | ||
875 | EXPORT_SYMBOL(nf_conntrack_hash); | ||
876 | EXPORT_SYMBOL(nf_conntrack_untracked); | ||
877 | EXPORT_SYMBOL_GPL(nf_conntrack_find_get); | ||
878 | #ifdef CONFIG_IP_NF_NAT_NEEDED | ||
879 | EXPORT_SYMBOL(nf_conntrack_tcp_update); | ||
880 | #endif | ||
881 | EXPORT_SYMBOL(__nf_conntrack_confirm); | ||
882 | EXPORT_SYMBOL(nf_ct_get_tuple); | ||
883 | EXPORT_SYMBOL(nf_ct_invert_tuple); | ||
884 | EXPORT_SYMBOL(nf_conntrack_in); | ||
885 | EXPORT_SYMBOL(__nf_conntrack_attach); | ||
886 | EXPORT_SYMBOL(nf_conntrack_alloc); | ||
887 | EXPORT_SYMBOL(nf_conntrack_free); | ||
888 | EXPORT_SYMBOL(nf_conntrack_flush); | ||
889 | EXPORT_SYMBOL(nf_ct_remove_expectations); | ||
890 | EXPORT_SYMBOL(nf_ct_helper_find_get); | ||
891 | EXPORT_SYMBOL(nf_ct_helper_put); | ||
892 | EXPORT_SYMBOL(__nf_conntrack_helper_find_byname); | ||
893 | EXPORT_SYMBOL(__nf_conntrack_find); | ||
894 | EXPORT_SYMBOL(nf_ct_unlink_expect); | ||
895 | EXPORT_SYMBOL(nf_conntrack_hash_insert); | ||
896 | EXPORT_SYMBOL(__nf_conntrack_expect_find); | ||
897 | EXPORT_SYMBOL(nf_conntrack_expect_find); | ||
898 | EXPORT_SYMBOL(nf_conntrack_expect_list); | ||
899 | #if defined(CONFIG_NF_CT_NETLINK) || \ | ||
900 | defined(CONFIG_NF_CT_NETLINK_MODULE) | ||
901 | EXPORT_SYMBOL(nf_ct_port_tuple_to_nfattr); | ||
902 | EXPORT_SYMBOL(nf_ct_port_nfattr_to_tuple); | ||
903 | #endif | ||
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c new file mode 100644 index 000000000000..f5bffe24b0a5 --- /dev/null +++ b/net/netfilter/nf_conntrack_tftp.c | |||
@@ -0,0 +1,160 @@ | |||
1 | /* (C) 2001-2002 Magnus Boden <mb@ozaba.mine.nu> | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 as | ||
5 | * published by the Free Software Foundation. | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/moduleparam.h> | ||
10 | #include <linux/in.h> | ||
11 | #include <linux/udp.h> | ||
12 | #include <linux/netfilter.h> | ||
13 | |||
14 | #include <net/netfilter/nf_conntrack.h> | ||
15 | #include <net/netfilter/nf_conntrack_tuple.h> | ||
16 | #include <net/netfilter/nf_conntrack_expect.h> | ||
17 | #include <net/netfilter/nf_conntrack_ecache.h> | ||
18 | #include <net/netfilter/nf_conntrack_helper.h> | ||
19 | #include <linux/netfilter/nf_conntrack_tftp.h> | ||
20 | |||
21 | MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>"); | ||
22 | MODULE_DESCRIPTION("TFTP connection tracking helper"); | ||
23 | MODULE_LICENSE("GPL"); | ||
24 | MODULE_ALIAS("ip_conntrack_tftp"); | ||
25 | |||
26 | #define MAX_PORTS 8 | ||
27 | static unsigned short ports[MAX_PORTS]; | ||
28 | static int ports_c; | ||
29 | module_param_array(ports, ushort, &ports_c, 0400); | ||
30 | MODULE_PARM_DESC(ports, "Port numbers of TFTP servers"); | ||
31 | |||
32 | #if 0 | ||
33 | #define DEBUGP(format, args...) printk("%s:%s:" format, \ | ||
34 | __FILE__, __FUNCTION__ , ## args) | ||
35 | #else | ||
36 | #define DEBUGP(format, args...) | ||
37 | #endif | ||
38 | |||
39 | unsigned int (*nf_nat_tftp_hook)(struct sk_buff **pskb, | ||
40 | enum ip_conntrack_info ctinfo, | ||
41 | struct nf_conntrack_expect *exp) __read_mostly; | ||
42 | EXPORT_SYMBOL_GPL(nf_nat_tftp_hook); | ||
43 | |||
44 | static int tftp_help(struct sk_buff **pskb, | ||
45 | unsigned int protoff, | ||
46 | struct nf_conn *ct, | ||
47 | enum ip_conntrack_info ctinfo) | ||
48 | { | ||
49 | struct tftphdr _tftph, *tfh; | ||
50 | struct nf_conntrack_expect *exp; | ||
51 | struct nf_conntrack_tuple *tuple; | ||
52 | unsigned int ret = NF_ACCEPT; | ||
53 | int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; | ||
54 | typeof(nf_nat_tftp_hook) nf_nat_tftp; | ||
55 | |||
56 | tfh = skb_header_pointer(*pskb, protoff + sizeof(struct udphdr), | ||
57 | sizeof(_tftph), &_tftph); | ||
58 | if (tfh == NULL) | ||
59 | return NF_ACCEPT; | ||
60 | |||
61 | switch (ntohs(tfh->opcode)) { | ||
62 | case TFTP_OPCODE_READ: | ||
63 | case TFTP_OPCODE_WRITE: | ||
64 | /* RRQ and WRQ works the same way */ | ||
65 | DEBUGP(""); | ||
66 | NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | ||
67 | NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); | ||
68 | |||
69 | exp = nf_conntrack_expect_alloc(ct); | ||
70 | if (exp == NULL) | ||
71 | return NF_DROP; | ||
72 | tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; | ||
73 | nf_conntrack_expect_init(exp, family, | ||
74 | &tuple->src.u3, &tuple->dst.u3, | ||
75 | IPPROTO_UDP, | ||
76 | NULL, &tuple->dst.u.udp.port); | ||
77 | |||
78 | DEBUGP("expect: "); | ||
79 | NF_CT_DUMP_TUPLE(&exp->tuple); | ||
80 | NF_CT_DUMP_TUPLE(&exp->mask); | ||
81 | |||
82 | nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook); | ||
83 | if (nf_nat_tftp && ct->status & IPS_NAT_MASK) | ||
84 | ret = nf_nat_tftp(pskb, ctinfo, exp); | ||
85 | else if (nf_conntrack_expect_related(exp) != 0) | ||
86 | ret = NF_DROP; | ||
87 | nf_conntrack_expect_put(exp); | ||
88 | break; | ||
89 | case TFTP_OPCODE_DATA: | ||
90 | case TFTP_OPCODE_ACK: | ||
91 | DEBUGP("Data/ACK opcode\n"); | ||
92 | break; | ||
93 | case TFTP_OPCODE_ERROR: | ||
94 | DEBUGP("Error opcode\n"); | ||
95 | break; | ||
96 | default: | ||
97 | DEBUGP("Unknown opcode\n"); | ||
98 | } | ||
99 | return ret; | ||
100 | } | ||
101 | |||
102 | static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly; | ||
103 | static char tftp_names[MAX_PORTS][2][sizeof("tftp-65535")] __read_mostly; | ||
104 | |||
105 | static void nf_conntrack_tftp_fini(void) | ||
106 | { | ||
107 | int i, j; | ||
108 | |||
109 | for (i = 0; i < ports_c; i++) { | ||
110 | for (j = 0; j < 2; j++) | ||
111 | nf_conntrack_helper_unregister(&tftp[i][j]); | ||
112 | } | ||
113 | } | ||
114 | |||
115 | static int __init nf_conntrack_tftp_init(void) | ||
116 | { | ||
117 | int i, j, ret; | ||
118 | char *tmpname; | ||
119 | |||
120 | if (ports_c == 0) | ||
121 | ports[ports_c++] = TFTP_PORT; | ||
122 | |||
123 | for (i = 0; i < ports_c; i++) { | ||
124 | memset(&tftp[i], 0, sizeof(tftp[i])); | ||
125 | |||
126 | tftp[i][0].tuple.src.l3num = AF_INET; | ||
127 | tftp[i][1].tuple.src.l3num = AF_INET6; | ||
128 | for (j = 0; j < 2; j++) { | ||
129 | tftp[i][j].tuple.dst.protonum = IPPROTO_UDP; | ||
130 | tftp[i][j].tuple.src.u.udp.port = htons(ports[i]); | ||
131 | tftp[i][j].mask.src.l3num = 0xFFFF; | ||
132 | tftp[i][j].mask.dst.protonum = 0xFF; | ||
133 | tftp[i][j].mask.src.u.udp.port = htons(0xFFFF); | ||
134 | tftp[i][j].max_expected = 1; | ||
135 | tftp[i][j].timeout = 5 * 60; /* 5 minutes */ | ||
136 | tftp[i][j].me = THIS_MODULE; | ||
137 | tftp[i][j].help = tftp_help; | ||
138 | |||
139 | tmpname = &tftp_names[i][j][0]; | ||
140 | if (ports[i] == TFTP_PORT) | ||
141 | sprintf(tmpname, "tftp"); | ||
142 | else | ||
143 | sprintf(tmpname, "tftp-%u", i); | ||
144 | tftp[i][j].name = tmpname; | ||
145 | |||
146 | ret = nf_conntrack_helper_register(&tftp[i][j]); | ||
147 | if (ret) { | ||
148 | printk("nf_ct_tftp: failed to register helper " | ||
149 | "for pf: %u port: %u\n", | ||
150 | tftp[i][j].tuple.src.l3num, ports[i]); | ||
151 | nf_conntrack_tftp_fini(); | ||
152 | return ret; | ||
153 | } | ||
154 | } | ||
155 | } | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | module_init(nf_conntrack_tftp_init); | ||
160 | module_exit(nf_conntrack_tftp_fini); | ||
diff --git a/net/netfilter/nf_sysctl.c b/net/netfilter/nf_sysctl.c new file mode 100644 index 000000000000..06ddddb2911f --- /dev/null +++ b/net/netfilter/nf_sysctl.c | |||
@@ -0,0 +1,134 @@ | |||
1 | /* nf_sysctl.c netfilter sysctl registration/unregistation | ||
2 | * | ||
3 | * Copyright (c) 2006 Patrick McHardy <kaber@trash.net> | ||
4 | */ | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/sysctl.h> | ||
7 | #include <linux/string.h> | ||
8 | #include <linux/slab.h> | ||
9 | |||
10 | static void | ||
11 | path_free(struct ctl_table *path, struct ctl_table *table) | ||
12 | { | ||
13 | struct ctl_table *t, *next; | ||
14 | |||
15 | for (t = path; t != NULL && t != table; t = next) { | ||
16 | next = t->child; | ||
17 | kfree(t); | ||
18 | } | ||
19 | } | ||
20 | |||
21 | static struct ctl_table * | ||
22 | path_dup(struct ctl_table *path, struct ctl_table *table) | ||
23 | { | ||
24 | struct ctl_table *t, *last = NULL, *tmp; | ||
25 | |||
26 | for (t = path; t != NULL; t = t->child) { | ||
27 | /* twice the size since path elements are terminated by an | ||
28 | * empty element */ | ||
29 | tmp = kmemdup(t, 2 * sizeof(*t), GFP_KERNEL); | ||
30 | if (tmp == NULL) { | ||
31 | if (last != NULL) | ||
32 | path_free(path, table); | ||
33 | return NULL; | ||
34 | } | ||
35 | |||
36 | if (last != NULL) | ||
37 | last->child = tmp; | ||
38 | else | ||
39 | path = tmp; | ||
40 | last = tmp; | ||
41 | } | ||
42 | |||
43 | if (last != NULL) | ||
44 | last->child = table; | ||
45 | else | ||
46 | path = table; | ||
47 | |||
48 | return path; | ||
49 | } | ||
50 | |||
51 | struct ctl_table_header * | ||
52 | nf_register_sysctl_table(struct ctl_table *path, struct ctl_table *table) | ||
53 | { | ||
54 | struct ctl_table_header *header; | ||
55 | |||
56 | path = path_dup(path, table); | ||
57 | if (path == NULL) | ||
58 | return NULL; | ||
59 | header = register_sysctl_table(path, 0); | ||
60 | if (header == NULL) | ||
61 | path_free(path, table); | ||
62 | return header; | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(nf_register_sysctl_table); | ||
65 | |||
66 | void | ||
67 | nf_unregister_sysctl_table(struct ctl_table_header *header, | ||
68 | struct ctl_table *table) | ||
69 | { | ||
70 | struct ctl_table *path = header->ctl_table; | ||
71 | |||
72 | unregister_sysctl_table(header); | ||
73 | path_free(path, table); | ||
74 | } | ||
75 | EXPORT_SYMBOL_GPL(nf_unregister_sysctl_table); | ||
76 | |||
77 | /* net/netfilter */ | ||
78 | static struct ctl_table nf_net_netfilter_table[] = { | ||
79 | { | ||
80 | .ctl_name = NET_NETFILTER, | ||
81 | .procname = "netfilter", | ||
82 | .mode = 0555, | ||
83 | }, | ||
84 | { | ||
85 | .ctl_name = 0 | ||
86 | } | ||
87 | }; | ||
88 | struct ctl_table nf_net_netfilter_sysctl_path[] = { | ||
89 | { | ||
90 | .ctl_name = CTL_NET, | ||
91 | .procname = "net", | ||
92 | .mode = 0555, | ||
93 | .child = nf_net_netfilter_table, | ||
94 | }, | ||
95 | { | ||
96 | .ctl_name = 0 | ||
97 | } | ||
98 | }; | ||
99 | EXPORT_SYMBOL_GPL(nf_net_netfilter_sysctl_path); | ||
100 | |||
101 | /* net/ipv4/netfilter */ | ||
102 | static struct ctl_table nf_net_ipv4_netfilter_table[] = { | ||
103 | { | ||
104 | .ctl_name = NET_IPV4_NETFILTER, | ||
105 | .procname = "netfilter", | ||
106 | .mode = 0555, | ||
107 | }, | ||
108 | { | ||
109 | .ctl_name = 0 | ||
110 | } | ||
111 | }; | ||
112 | static struct ctl_table nf_net_ipv4_table[] = { | ||
113 | { | ||
114 | .ctl_name = NET_IPV4, | ||
115 | .procname = "ipv4", | ||
116 | .mode = 0555, | ||
117 | .child = nf_net_ipv4_netfilter_table, | ||
118 | }, | ||
119 | { | ||
120 | .ctl_name = 0 | ||
121 | } | ||
122 | }; | ||
123 | struct ctl_table nf_net_ipv4_netfilter_sysctl_path[] = { | ||
124 | { | ||
125 | .ctl_name = CTL_NET, | ||
126 | .procname = "net", | ||
127 | .mode = 0555, | ||
128 | .child = nf_net_ipv4_table, | ||
129 | }, | ||
130 | { | ||
131 | .ctl_name = 0 | ||
132 | } | ||
133 | }; | ||
134 | EXPORT_SYMBOL_GPL(nf_net_ipv4_netfilter_sysctl_path); | ||
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 1e5207b80fe5..d1505dd25c66 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -408,13 +408,13 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
408 | const struct net_device *indev, | 408 | const struct net_device *indev, |
409 | const struct net_device *outdev, | 409 | const struct net_device *outdev, |
410 | const struct nf_loginfo *li, | 410 | const struct nf_loginfo *li, |
411 | const char *prefix) | 411 | const char *prefix, unsigned int plen) |
412 | { | 412 | { |
413 | unsigned char *old_tail; | 413 | unsigned char *old_tail; |
414 | struct nfulnl_msg_packet_hdr pmsg; | 414 | struct nfulnl_msg_packet_hdr pmsg; |
415 | struct nlmsghdr *nlh; | 415 | struct nlmsghdr *nlh; |
416 | struct nfgenmsg *nfmsg; | 416 | struct nfgenmsg *nfmsg; |
417 | u_int32_t tmp_uint; | 417 | __be32 tmp_uint; |
418 | 418 | ||
419 | UDEBUG("entered\n"); | 419 | UDEBUG("entered\n"); |
420 | 420 | ||
@@ -432,12 +432,8 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
432 | 432 | ||
433 | NFA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg); | 433 | NFA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg); |
434 | 434 | ||
435 | if (prefix) { | 435 | if (prefix) |
436 | int slen = strlen(prefix); | 436 | NFA_PUT(inst->skb, NFULA_PREFIX, plen, prefix); |
437 | if (slen > NFULNL_PREFIXLEN) | ||
438 | slen = NFULNL_PREFIXLEN; | ||
439 | NFA_PUT(inst->skb, NFULA_PREFIX, slen, prefix); | ||
440 | } | ||
441 | 437 | ||
442 | if (indev) { | 438 | if (indev) { |
443 | tmp_uint = htonl(indev->ifindex); | 439 | tmp_uint = htonl(indev->ifindex); |
@@ -501,18 +497,16 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
501 | #endif | 497 | #endif |
502 | } | 498 | } |
503 | 499 | ||
504 | if (skb->nfmark) { | 500 | if (skb->mark) { |
505 | tmp_uint = htonl(skb->nfmark); | 501 | tmp_uint = htonl(skb->mark); |
506 | NFA_PUT(inst->skb, NFULA_MARK, sizeof(tmp_uint), &tmp_uint); | 502 | NFA_PUT(inst->skb, NFULA_MARK, sizeof(tmp_uint), &tmp_uint); |
507 | } | 503 | } |
508 | 504 | ||
509 | if (indev && skb->dev && skb->dev->hard_header_parse) { | 505 | if (indev && skb->dev && skb->dev->hard_header_parse) { |
510 | struct nfulnl_msg_packet_hw phw; | 506 | struct nfulnl_msg_packet_hw phw; |
511 | 507 | int len = skb->dev->hard_header_parse((struct sk_buff *)skb, | |
512 | phw.hw_addrlen = | ||
513 | skb->dev->hard_header_parse((struct sk_buff *)skb, | ||
514 | phw.hw_addr); | 508 | phw.hw_addr); |
515 | phw.hw_addrlen = htons(phw.hw_addrlen); | 509 | phw.hw_addrlen = htons(len); |
516 | NFA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); | 510 | NFA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); |
517 | } | 511 | } |
518 | 512 | ||
@@ -529,7 +523,7 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
529 | if (skb->sk) { | 523 | if (skb->sk) { |
530 | read_lock_bh(&skb->sk->sk_callback_lock); | 524 | read_lock_bh(&skb->sk->sk_callback_lock); |
531 | if (skb->sk->sk_socket && skb->sk->sk_socket->file) { | 525 | if (skb->sk->sk_socket && skb->sk->sk_socket->file) { |
532 | u_int32_t uid = htonl(skb->sk->sk_socket->file->f_uid); | 526 | __be32 uid = htonl(skb->sk->sk_socket->file->f_uid); |
533 | /* need to unlock here since NFA_PUT may goto */ | 527 | /* need to unlock here since NFA_PUT may goto */ |
534 | read_unlock_bh(&skb->sk->sk_callback_lock); | 528 | read_unlock_bh(&skb->sk->sk_callback_lock); |
535 | NFA_PUT(inst->skb, NFULA_UID, sizeof(uid), &uid); | 529 | NFA_PUT(inst->skb, NFULA_UID, sizeof(uid), &uid); |
@@ -603,6 +597,7 @@ nfulnl_log_packet(unsigned int pf, | |||
603 | const struct nf_loginfo *li; | 597 | const struct nf_loginfo *li; |
604 | unsigned int qthreshold; | 598 | unsigned int qthreshold; |
605 | unsigned int nlbufsiz; | 599 | unsigned int nlbufsiz; |
600 | unsigned int plen; | ||
606 | 601 | ||
607 | if (li_user && li_user->type == NF_LOG_TYPE_ULOG) | 602 | if (li_user && li_user->type == NF_LOG_TYPE_ULOG) |
608 | li = li_user; | 603 | li = li_user; |
@@ -618,6 +613,10 @@ nfulnl_log_packet(unsigned int pf, | |||
618 | return; | 613 | return; |
619 | } | 614 | } |
620 | 615 | ||
616 | plen = 0; | ||
617 | if (prefix) | ||
618 | plen = strlen(prefix); | ||
619 | |||
621 | /* all macros expand to constant values at compile time */ | 620 | /* all macros expand to constant values at compile time */ |
622 | /* FIXME: do we want to make the size calculation conditional based on | 621 | /* FIXME: do we want to make the size calculation conditional based on |
623 | * what is actually present? way more branches and checks, but more | 622 | * what is actually present? way more branches and checks, but more |
@@ -632,7 +631,7 @@ nfulnl_log_packet(unsigned int pf, | |||
632 | #endif | 631 | #endif |
633 | + NFA_SPACE(sizeof(u_int32_t)) /* mark */ | 632 | + NFA_SPACE(sizeof(u_int32_t)) /* mark */ |
634 | + NFA_SPACE(sizeof(u_int32_t)) /* uid */ | 633 | + NFA_SPACE(sizeof(u_int32_t)) /* uid */ |
635 | + NFA_SPACE(NFULNL_PREFIXLEN) /* prefix */ | 634 | + NFA_SPACE(plen) /* prefix */ |
636 | + NFA_SPACE(sizeof(struct nfulnl_msg_packet_hw)) | 635 | + NFA_SPACE(sizeof(struct nfulnl_msg_packet_hw)) |
637 | + NFA_SPACE(sizeof(struct nfulnl_msg_packet_timestamp)); | 636 | + NFA_SPACE(sizeof(struct nfulnl_msg_packet_timestamp)); |
638 | 637 | ||
@@ -703,7 +702,7 @@ nfulnl_log_packet(unsigned int pf, | |||
703 | inst->qlen++; | 702 | inst->qlen++; |
704 | 703 | ||
705 | __build_packet_message(inst, skb, data_len, pf, | 704 | __build_packet_message(inst, skb, data_len, pf, |
706 | hooknum, in, out, li, prefix); | 705 | hooknum, in, out, li, prefix, plen); |
707 | 706 | ||
708 | /* timer_pending always called within inst->lock, so there | 707 | /* timer_pending always called within inst->lock, so there |
709 | * is no chance of a race here */ | 708 | * is no chance of a race here */ |
@@ -882,15 +881,15 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
882 | } | 881 | } |
883 | 882 | ||
884 | if (nfula[NFULA_CFG_TIMEOUT-1]) { | 883 | if (nfula[NFULA_CFG_TIMEOUT-1]) { |
885 | u_int32_t timeout = | 884 | __be32 timeout = |
886 | *(u_int32_t *)NFA_DATA(nfula[NFULA_CFG_TIMEOUT-1]); | 885 | *(__be32 *)NFA_DATA(nfula[NFULA_CFG_TIMEOUT-1]); |
887 | 886 | ||
888 | nfulnl_set_timeout(inst, ntohl(timeout)); | 887 | nfulnl_set_timeout(inst, ntohl(timeout)); |
889 | } | 888 | } |
890 | 889 | ||
891 | if (nfula[NFULA_CFG_NLBUFSIZ-1]) { | 890 | if (nfula[NFULA_CFG_NLBUFSIZ-1]) { |
892 | u_int32_t nlbufsiz = | 891 | __be32 nlbufsiz = |
893 | *(u_int32_t *)NFA_DATA(nfula[NFULA_CFG_NLBUFSIZ-1]); | 892 | *(__be32 *)NFA_DATA(nfula[NFULA_CFG_NLBUFSIZ-1]); |
894 | 893 | ||
895 | nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz)); | 894 | nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz)); |
896 | } | 895 | } |
@@ -903,8 +902,8 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
903 | } | 902 | } |
904 | 903 | ||
905 | if (nfula[NFULA_CFG_FLAGS-1]) { | 904 | if (nfula[NFULA_CFG_FLAGS-1]) { |
906 | u_int16_t flags = | 905 | __be16 flags = |
907 | *(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]); | 906 | *(__be16 *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]); |
908 | nfulnl_set_flags(inst, ntohs(flags)); | 907 | nfulnl_set_flags(inst, ntohs(flags)); |
909 | } | 908 | } |
910 | 909 | ||
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index e815a9aa6e95..a88a017da22c 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -349,7 +349,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
349 | struct sk_buff *entskb = entry->skb; | 349 | struct sk_buff *entskb = entry->skb; |
350 | struct net_device *indev; | 350 | struct net_device *indev; |
351 | struct net_device *outdev; | 351 | struct net_device *outdev; |
352 | unsigned int tmp_uint; | 352 | __be32 tmp_uint; |
353 | 353 | ||
354 | QDEBUG("entered\n"); | 354 | QDEBUG("entered\n"); |
355 | 355 | ||
@@ -480,8 +480,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
480 | #endif | 480 | #endif |
481 | } | 481 | } |
482 | 482 | ||
483 | if (entskb->nfmark) { | 483 | if (entskb->mark) { |
484 | tmp_uint = htonl(entskb->nfmark); | 484 | tmp_uint = htonl(entskb->mark); |
485 | NFA_PUT(skb, NFQA_MARK, sizeof(u_int32_t), &tmp_uint); | 485 | NFA_PUT(skb, NFQA_MARK, sizeof(u_int32_t), &tmp_uint); |
486 | } | 486 | } |
487 | 487 | ||
@@ -489,10 +489,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
489 | && entskb->dev->hard_header_parse) { | 489 | && entskb->dev->hard_header_parse) { |
490 | struct nfqnl_msg_packet_hw phw; | 490 | struct nfqnl_msg_packet_hw phw; |
491 | 491 | ||
492 | phw.hw_addrlen = | 492 | int len = entskb->dev->hard_header_parse(entskb, |
493 | entskb->dev->hard_header_parse(entskb, | ||
494 | phw.hw_addr); | 493 | phw.hw_addr); |
495 | phw.hw_addrlen = htons(phw.hw_addrlen); | 494 | phw.hw_addrlen = htons(len); |
496 | NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); | 495 | NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); |
497 | } | 496 | } |
498 | 497 | ||
@@ -835,8 +834,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, | |||
835 | } | 834 | } |
836 | 835 | ||
837 | if (nfqa[NFQA_MARK-1]) | 836 | if (nfqa[NFQA_MARK-1]) |
838 | entry->skb->nfmark = ntohl(*(u_int32_t *) | 837 | entry->skb->mark = ntohl(*(__be32 *) |
839 | NFA_DATA(nfqa[NFQA_MARK-1])); | 838 | NFA_DATA(nfqa[NFQA_MARK-1])); |
840 | 839 | ||
841 | issue_verdict(entry, verdict); | 840 | issue_verdict(entry, verdict); |
842 | instance_put(queue); | 841 | instance_put(queue); |
@@ -948,6 +947,14 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
948 | ntohl(params->copy_range)); | 947 | ntohl(params->copy_range)); |
949 | } | 948 | } |
950 | 949 | ||
950 | if (nfqa[NFQA_CFG_QUEUE_MAXLEN-1]) { | ||
951 | __be32 *queue_maxlen; | ||
952 | queue_maxlen = NFA_DATA(nfqa[NFQA_CFG_QUEUE_MAXLEN-1]); | ||
953 | spin_lock_bh(&queue->lock); | ||
954 | queue->queue_maxlen = ntohl(*queue_maxlen); | ||
955 | spin_unlock_bh(&queue->lock); | ||
956 | } | ||
957 | |||
951 | out_put: | 958 | out_put: |
952 | instance_put(queue); | 959 | instance_put(queue); |
953 | return ret; | 960 | return ret; |
diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c index c01524f817f0..b5548239d412 100644 --- a/net/netfilter/xt_CONNMARK.c +++ b/net/netfilter/xt_CONNMARK.c | |||
@@ -31,6 +31,9 @@ MODULE_ALIAS("ipt_CONNMARK"); | |||
31 | #include <linux/netfilter/x_tables.h> | 31 | #include <linux/netfilter/x_tables.h> |
32 | #include <linux/netfilter/xt_CONNMARK.h> | 32 | #include <linux/netfilter/xt_CONNMARK.h> |
33 | #include <net/netfilter/nf_conntrack_compat.h> | 33 | #include <net/netfilter/nf_conntrack_compat.h> |
34 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
35 | #include <net/netfilter/nf_conntrack_ecache.h> | ||
36 | #endif | ||
34 | 37 | ||
35 | static unsigned int | 38 | static unsigned int |
36 | target(struct sk_buff **pskb, | 39 | target(struct sk_buff **pskb, |
@@ -42,7 +45,7 @@ target(struct sk_buff **pskb, | |||
42 | { | 45 | { |
43 | const struct xt_connmark_target_info *markinfo = targinfo; | 46 | const struct xt_connmark_target_info *markinfo = targinfo; |
44 | u_int32_t diff; | 47 | u_int32_t diff; |
45 | u_int32_t nfmark; | 48 | u_int32_t mark; |
46 | u_int32_t newmark; | 49 | u_int32_t newmark; |
47 | u_int32_t ctinfo; | 50 | u_int32_t ctinfo; |
48 | u_int32_t *ctmark = nf_ct_get_mark(*pskb, &ctinfo); | 51 | u_int32_t *ctmark = nf_ct_get_mark(*pskb, &ctinfo); |
@@ -62,7 +65,7 @@ target(struct sk_buff **pskb, | |||
62 | break; | 65 | break; |
63 | case XT_CONNMARK_SAVE: | 66 | case XT_CONNMARK_SAVE: |
64 | newmark = (*ctmark & ~markinfo->mask) | | 67 | newmark = (*ctmark & ~markinfo->mask) | |
65 | ((*pskb)->nfmark & markinfo->mask); | 68 | ((*pskb)->mark & markinfo->mask); |
66 | if (*ctmark != newmark) { | 69 | if (*ctmark != newmark) { |
67 | *ctmark = newmark; | 70 | *ctmark = newmark; |
68 | #if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE) | 71 | #if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE) |
@@ -73,10 +76,10 @@ target(struct sk_buff **pskb, | |||
73 | } | 76 | } |
74 | break; | 77 | break; |
75 | case XT_CONNMARK_RESTORE: | 78 | case XT_CONNMARK_RESTORE: |
76 | nfmark = (*pskb)->nfmark; | 79 | mark = (*pskb)->mark; |
77 | diff = (*ctmark ^ nfmark) & markinfo->mask; | 80 | diff = (*ctmark ^ mark) & markinfo->mask; |
78 | if (diff != 0) | 81 | if (diff != 0) |
79 | (*pskb)->nfmark = nfmark ^ diff; | 82 | (*pskb)->mark = mark ^ diff; |
80 | break; | 83 | break; |
81 | } | 84 | } |
82 | } | 85 | } |
diff --git a/net/netfilter/xt_MARK.c b/net/netfilter/xt_MARK.c index c6e860a7114f..0b48547e8d64 100644 --- a/net/netfilter/xt_MARK.c +++ b/net/netfilter/xt_MARK.c | |||
@@ -31,8 +31,8 @@ target_v0(struct sk_buff **pskb, | |||
31 | { | 31 | { |
32 | const struct xt_mark_target_info *markinfo = targinfo; | 32 | const struct xt_mark_target_info *markinfo = targinfo; |
33 | 33 | ||
34 | if((*pskb)->nfmark != markinfo->mark) | 34 | if((*pskb)->mark != markinfo->mark) |
35 | (*pskb)->nfmark = markinfo->mark; | 35 | (*pskb)->mark = markinfo->mark; |
36 | 36 | ||
37 | return XT_CONTINUE; | 37 | return XT_CONTINUE; |
38 | } | 38 | } |
@@ -54,16 +54,16 @@ target_v1(struct sk_buff **pskb, | |||
54 | break; | 54 | break; |
55 | 55 | ||
56 | case XT_MARK_AND: | 56 | case XT_MARK_AND: |
57 | mark = (*pskb)->nfmark & markinfo->mark; | 57 | mark = (*pskb)->mark & markinfo->mark; |
58 | break; | 58 | break; |
59 | 59 | ||
60 | case XT_MARK_OR: | 60 | case XT_MARK_OR: |
61 | mark = (*pskb)->nfmark | markinfo->mark; | 61 | mark = (*pskb)->mark | markinfo->mark; |
62 | break; | 62 | break; |
63 | } | 63 | } |
64 | 64 | ||
65 | if((*pskb)->nfmark != mark) | 65 | if((*pskb)->mark != mark) |
66 | (*pskb)->nfmark = mark; | 66 | (*pskb)->mark = mark; |
67 | 67 | ||
68 | return XT_CONTINUE; | 68 | return XT_CONTINUE; |
69 | } | 69 | } |
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c new file mode 100644 index 000000000000..901ed7abaa1b --- /dev/null +++ b/net/netfilter/xt_NFLOG.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 Patrick McHardy <kaber@trash.net> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/skbuff.h> | ||
12 | |||
13 | #include <linux/netfilter/x_tables.h> | ||
14 | #include <linux/netfilter/xt_NFLOG.h> | ||
15 | |||
16 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); | ||
17 | MODULE_DESCRIPTION("x_tables NFLOG target"); | ||
18 | MODULE_LICENSE("GPL"); | ||
19 | MODULE_ALIAS("ipt_NFLOG"); | ||
20 | MODULE_ALIAS("ip6t_NFLOG"); | ||
21 | |||
22 | static unsigned int | ||
23 | nflog_target(struct sk_buff **pskb, | ||
24 | const struct net_device *in, const struct net_device *out, | ||
25 | unsigned int hooknum, const struct xt_target *target, | ||
26 | const void *targinfo) | ||
27 | { | ||
28 | const struct xt_nflog_info *info = targinfo; | ||
29 | struct nf_loginfo li; | ||
30 | |||
31 | li.type = NF_LOG_TYPE_ULOG; | ||
32 | li.u.ulog.copy_len = info->len; | ||
33 | li.u.ulog.group = info->group; | ||
34 | li.u.ulog.qthreshold = info->threshold; | ||
35 | |||
36 | nf_log_packet(target->family, hooknum, *pskb, in, out, &li, | ||
37 | "%s", info->prefix); | ||
38 | return XT_CONTINUE; | ||
39 | } | ||
40 | |||
41 | static int | ||
42 | nflog_checkentry(const char *tablename, const void *entry, | ||
43 | const struct xt_target *target, void *targetinfo, | ||
44 | unsigned int hookmask) | ||
45 | { | ||
46 | struct xt_nflog_info *info = targetinfo; | ||
47 | |||
48 | if (info->flags & ~XT_NFLOG_MASK) | ||
49 | return 0; | ||
50 | if (info->prefix[sizeof(info->prefix) - 1] != '\0') | ||
51 | return 0; | ||
52 | return 1; | ||
53 | } | ||
54 | |||
55 | static struct xt_target xt_nflog_target[] = { | ||
56 | { | ||
57 | .name = "NFLOG", | ||
58 | .family = AF_INET, | ||
59 | .checkentry = nflog_checkentry, | ||
60 | .target = nflog_target, | ||
61 | .targetsize = sizeof(struct xt_nflog_info), | ||
62 | .me = THIS_MODULE, | ||
63 | }, | ||
64 | { | ||
65 | .name = "NFLOG", | ||
66 | .family = AF_INET6, | ||
67 | .checkentry = nflog_checkentry, | ||
68 | .target = nflog_target, | ||
69 | .targetsize = sizeof(struct xt_nflog_info), | ||
70 | .me = THIS_MODULE, | ||
71 | }, | ||
72 | }; | ||
73 | |||
74 | static int __init xt_nflog_init(void) | ||
75 | { | ||
76 | return xt_register_targets(xt_nflog_target, | ||
77 | ARRAY_SIZE(xt_nflog_target)); | ||
78 | } | ||
79 | |||
80 | static void __exit xt_nflog_fini(void) | ||
81 | { | ||
82 | xt_unregister_targets(xt_nflog_target, ARRAY_SIZE(xt_nflog_target)); | ||
83 | } | ||
84 | |||
85 | module_init(xt_nflog_init); | ||
86 | module_exit(xt_nflog_fini); | ||
diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 33ccdbf8e794..501c564e247f 100644 --- a/net/ipv4/netfilter/ipt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
@@ -6,23 +6,8 @@ | |||
6 | * $Id: ipt_hashlimit.c 3244 2004-10-20 16:24:29Z laforge@netfilter.org $ | 6 | * $Id: ipt_hashlimit.c 3244 2004-10-20 16:24:29Z laforge@netfilter.org $ |
7 | * | 7 | * |
8 | * Development of this code was funded by Astaro AG, http://www.astaro.com/ | 8 | * Development of this code was funded by Astaro AG, http://www.astaro.com/ |
9 | * | ||
10 | * based on ipt_limit.c by: | ||
11 | * Jérôme de Vivie <devivie@info.enserb.u-bordeaux.fr> | ||
12 | * Hervé Eychenne <eychenne@info.enserb.u-bordeaux.fr> | ||
13 | * Rusty Russell <rusty@rustcorp.com.au> | ||
14 | * | ||
15 | * The general idea is to create a hash table for every dstip and have a | ||
16 | * seperate limit counter per tuple. This way you can do something like 'limit | ||
17 | * the number of syn packets for each of my internal addresses. | ||
18 | * | ||
19 | * Ideally this would just be implemented as a general 'hash' match, which would | ||
20 | * allow us to attach any iptables target to it's hash buckets. But this is | ||
21 | * not possible in the current iptables architecture. As always, pkttables for | ||
22 | * 2.7.x will help ;) | ||
23 | */ | 9 | */ |
24 | #include <linux/module.h> | 10 | #include <linux/module.h> |
25 | #include <linux/skbuff.h> | ||
26 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
27 | #include <linux/random.h> | 12 | #include <linux/random.h> |
28 | #include <linux/jhash.h> | 13 | #include <linux/jhash.h> |
@@ -31,28 +16,40 @@ | |||
31 | #include <linux/proc_fs.h> | 16 | #include <linux/proc_fs.h> |
32 | #include <linux/seq_file.h> | 17 | #include <linux/seq_file.h> |
33 | #include <linux/list.h> | 18 | #include <linux/list.h> |
19 | #include <linux/skbuff.h> | ||
20 | #include <linux/in.h> | ||
21 | #include <linux/ip.h> | ||
22 | #include <linux/ipv6.h> | ||
34 | 23 | ||
24 | #include <linux/netfilter/x_tables.h> | ||
35 | #include <linux/netfilter_ipv4/ip_tables.h> | 25 | #include <linux/netfilter_ipv4/ip_tables.h> |
36 | #include <linux/netfilter_ipv4/ipt_hashlimit.h> | 26 | #include <linux/netfilter_ipv6/ip6_tables.h> |
37 | 27 | #include <linux/netfilter/xt_hashlimit.h> | |
38 | /* FIXME: this is just for IP_NF_ASSERRT */ | ||
39 | #include <linux/netfilter_ipv4/ip_conntrack.h> | ||
40 | #include <linux/mutex.h> | 28 | #include <linux/mutex.h> |
41 | 29 | ||
42 | MODULE_LICENSE("GPL"); | 30 | MODULE_LICENSE("GPL"); |
43 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 31 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
44 | MODULE_DESCRIPTION("iptables match for limiting per hash-bucket"); | 32 | MODULE_DESCRIPTION("iptables match for limiting per hash-bucket"); |
33 | MODULE_ALIAS("ipt_hashlimit"); | ||
34 | MODULE_ALIAS("ip6t_hashlimit"); | ||
45 | 35 | ||
46 | /* need to declare this at the top */ | 36 | /* need to declare this at the top */ |
47 | static struct proc_dir_entry *hashlimit_procdir; | 37 | static struct proc_dir_entry *hashlimit_procdir4; |
38 | static struct proc_dir_entry *hashlimit_procdir6; | ||
48 | static struct file_operations dl_file_ops; | 39 | static struct file_operations dl_file_ops; |
49 | 40 | ||
50 | /* hash table crap */ | 41 | /* hash table crap */ |
51 | |||
52 | struct dsthash_dst { | 42 | struct dsthash_dst { |
53 | __be32 src_ip; | 43 | union { |
54 | __be32 dst_ip; | 44 | struct { |
55 | /* ports have to be consecutive !!! */ | 45 | __be32 src; |
46 | __be32 dst; | ||
47 | } ip; | ||
48 | struct { | ||
49 | __be32 src[4]; | ||
50 | __be32 dst[4]; | ||
51 | } ip6; | ||
52 | } addr; | ||
56 | __be16 src_port; | 53 | __be16 src_port; |
57 | __be16 dst_port; | 54 | __be16 dst_port; |
58 | }; | 55 | }; |
@@ -71,9 +68,10 @@ struct dsthash_ent { | |||
71 | } rateinfo; | 68 | } rateinfo; |
72 | }; | 69 | }; |
73 | 70 | ||
74 | struct ipt_hashlimit_htable { | 71 | struct xt_hashlimit_htable { |
75 | struct hlist_node node; /* global list of all htables */ | 72 | struct hlist_node node; /* global list of all htables */ |
76 | atomic_t use; | 73 | atomic_t use; |
74 | int family; | ||
77 | 75 | ||
78 | struct hashlimit_cfg cfg; /* config */ | 76 | struct hashlimit_cfg cfg; /* config */ |
79 | 77 | ||
@@ -81,8 +79,8 @@ struct ipt_hashlimit_htable { | |||
81 | spinlock_t lock; /* lock for list_head */ | 79 | spinlock_t lock; /* lock for list_head */ |
82 | u_int32_t rnd; /* random seed for hash */ | 80 | u_int32_t rnd; /* random seed for hash */ |
83 | int rnd_initialized; | 81 | int rnd_initialized; |
82 | unsigned int count; /* number entries in table */ | ||
84 | struct timer_list timer; /* timer for gc */ | 83 | struct timer_list timer; /* timer for gc */ |
85 | atomic_t count; /* number entries in table */ | ||
86 | 84 | ||
87 | /* seq_file stuff */ | 85 | /* seq_file stuff */ |
88 | struct proc_dir_entry *pde; | 86 | struct proc_dir_entry *pde; |
@@ -97,41 +95,33 @@ static kmem_cache_t *hashlimit_cachep __read_mostly; | |||
97 | 95 | ||
98 | static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b) | 96 | static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b) |
99 | { | 97 | { |
100 | return (ent->dst.dst_ip == b->dst_ip | 98 | return !memcmp(&ent->dst, b, sizeof(ent->dst)); |
101 | && ent->dst.dst_port == b->dst_port | ||
102 | && ent->dst.src_port == b->src_port | ||
103 | && ent->dst.src_ip == b->src_ip); | ||
104 | } | 99 | } |
105 | 100 | ||
106 | static inline u_int32_t | 101 | static u_int32_t |
107 | hash_dst(const struct ipt_hashlimit_htable *ht, const struct dsthash_dst *dst) | 102 | hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst) |
108 | { | 103 | { |
109 | return (jhash_3words((__force u32)dst->dst_ip, | 104 | return jhash(dst, sizeof(*dst), ht->rnd) % ht->cfg.size; |
110 | ((__force u32)dst->dst_port<<16 | | ||
111 | (__force u32)dst->src_port), | ||
112 | (__force u32)dst->src_ip, ht->rnd) % ht->cfg.size); | ||
113 | } | 105 | } |
114 | 106 | ||
115 | static inline struct dsthash_ent * | 107 | static struct dsthash_ent * |
116 | __dsthash_find(const struct ipt_hashlimit_htable *ht, struct dsthash_dst *dst) | 108 | dsthash_find(const struct xt_hashlimit_htable *ht, struct dsthash_dst *dst) |
117 | { | 109 | { |
118 | struct dsthash_ent *ent; | 110 | struct dsthash_ent *ent; |
119 | struct hlist_node *pos; | 111 | struct hlist_node *pos; |
120 | u_int32_t hash = hash_dst(ht, dst); | 112 | u_int32_t hash = hash_dst(ht, dst); |
121 | 113 | ||
122 | if (!hlist_empty(&ht->hash[hash])) | 114 | if (!hlist_empty(&ht->hash[hash])) { |
123 | hlist_for_each_entry(ent, pos, &ht->hash[hash], node) { | 115 | hlist_for_each_entry(ent, pos, &ht->hash[hash], node) |
124 | if (dst_cmp(ent, dst)) { | 116 | if (dst_cmp(ent, dst)) |
125 | return ent; | 117 | return ent; |
126 | } | 118 | } |
127 | } | ||
128 | |||
129 | return NULL; | 119 | return NULL; |
130 | } | 120 | } |
131 | 121 | ||
132 | /* allocate dsthash_ent, initialize dst, put in htable and lock it */ | 122 | /* allocate dsthash_ent, initialize dst, put in htable and lock it */ |
133 | static struct dsthash_ent * | 123 | static struct dsthash_ent * |
134 | __dsthash_alloc_init(struct ipt_hashlimit_htable *ht, struct dsthash_dst *dst) | 124 | dsthash_alloc_init(struct xt_hashlimit_htable *ht, struct dsthash_dst *dst) |
135 | { | 125 | { |
136 | struct dsthash_ent *ent; | 126 | struct dsthash_ent *ent; |
137 | 127 | ||
@@ -142,12 +132,11 @@ __dsthash_alloc_init(struct ipt_hashlimit_htable *ht, struct dsthash_dst *dst) | |||
142 | ht->rnd_initialized = 1; | 132 | ht->rnd_initialized = 1; |
143 | } | 133 | } |
144 | 134 | ||
145 | if (ht->cfg.max && | 135 | if (ht->cfg.max && ht->count >= ht->cfg.max) { |
146 | atomic_read(&ht->count) >= ht->cfg.max) { | ||
147 | /* FIXME: do something. question is what.. */ | 136 | /* FIXME: do something. question is what.. */ |
148 | if (net_ratelimit()) | 137 | if (net_ratelimit()) |
149 | printk(KERN_WARNING | 138 | printk(KERN_WARNING |
150 | "ipt_hashlimit: max count of %u reached\n", | 139 | "xt_hashlimit: max count of %u reached\n", |
151 | ht->cfg.max); | 140 | ht->cfg.max); |
152 | return NULL; | 141 | return NULL; |
153 | } | 142 | } |
@@ -155,53 +144,47 @@ __dsthash_alloc_init(struct ipt_hashlimit_htable *ht, struct dsthash_dst *dst) | |||
155 | ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); | 144 | ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); |
156 | if (!ent) { | 145 | if (!ent) { |
157 | if (net_ratelimit()) | 146 | if (net_ratelimit()) |
158 | printk(KERN_ERR | 147 | printk(KERN_ERR |
159 | "ipt_hashlimit: can't allocate dsthash_ent\n"); | 148 | "xt_hashlimit: can't allocate dsthash_ent\n"); |
160 | return NULL; | 149 | return NULL; |
161 | } | 150 | } |
162 | 151 | memcpy(&ent->dst, dst, sizeof(ent->dst)); | |
163 | atomic_inc(&ht->count); | ||
164 | |||
165 | ent->dst.dst_ip = dst->dst_ip; | ||
166 | ent->dst.dst_port = dst->dst_port; | ||
167 | ent->dst.src_ip = dst->src_ip; | ||
168 | ent->dst.src_port = dst->src_port; | ||
169 | 152 | ||
170 | hlist_add_head(&ent->node, &ht->hash[hash_dst(ht, dst)]); | 153 | hlist_add_head(&ent->node, &ht->hash[hash_dst(ht, dst)]); |
171 | 154 | ht->count++; | |
172 | return ent; | 155 | return ent; |
173 | } | 156 | } |
174 | 157 | ||
175 | static inline void | 158 | static inline void |
176 | __dsthash_free(struct ipt_hashlimit_htable *ht, struct dsthash_ent *ent) | 159 | dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent) |
177 | { | 160 | { |
178 | hlist_del(&ent->node); | 161 | hlist_del(&ent->node); |
179 | kmem_cache_free(hashlimit_cachep, ent); | 162 | kmem_cache_free(hashlimit_cachep, ent); |
180 | atomic_dec(&ht->count); | 163 | ht->count--; |
181 | } | 164 | } |
182 | static void htable_gc(unsigned long htlong); | 165 | static void htable_gc(unsigned long htlong); |
183 | 166 | ||
184 | static int htable_create(struct ipt_hashlimit_info *minfo) | 167 | static int htable_create(struct xt_hashlimit_info *minfo, int family) |
185 | { | 168 | { |
186 | int i; | 169 | struct xt_hashlimit_htable *hinfo; |
187 | unsigned int size; | 170 | unsigned int size; |
188 | struct ipt_hashlimit_htable *hinfo; | 171 | unsigned int i; |
189 | 172 | ||
190 | if (minfo->cfg.size) | 173 | if (minfo->cfg.size) |
191 | size = minfo->cfg.size; | 174 | size = minfo->cfg.size; |
192 | else { | 175 | else { |
193 | size = (((num_physpages << PAGE_SHIFT) / 16384) | 176 | size = ((num_physpages << PAGE_SHIFT) / 16384) / |
194 | / sizeof(struct list_head)); | 177 | sizeof(struct list_head); |
195 | if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) | 178 | if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) |
196 | size = 8192; | 179 | size = 8192; |
197 | if (size < 16) | 180 | if (size < 16) |
198 | size = 16; | 181 | size = 16; |
199 | } | 182 | } |
200 | /* FIXME: don't use vmalloc() here or anywhere else -HW */ | 183 | /* FIXME: don't use vmalloc() here or anywhere else -HW */ |
201 | hinfo = vmalloc(sizeof(struct ipt_hashlimit_htable) | 184 | hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) + |
202 | + (sizeof(struct list_head) * size)); | 185 | sizeof(struct list_head) * size); |
203 | if (!hinfo) { | 186 | if (!hinfo) { |
204 | printk(KERN_ERR "ipt_hashlimit: Unable to create hashtable\n"); | 187 | printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n"); |
205 | return -1; | 188 | return -1; |
206 | } | 189 | } |
207 | minfo->hinfo = hinfo; | 190 | minfo->hinfo = hinfo; |
@@ -217,11 +200,14 @@ static int htable_create(struct ipt_hashlimit_info *minfo) | |||
217 | for (i = 0; i < hinfo->cfg.size; i++) | 200 | for (i = 0; i < hinfo->cfg.size; i++) |
218 | INIT_HLIST_HEAD(&hinfo->hash[i]); | 201 | INIT_HLIST_HEAD(&hinfo->hash[i]); |
219 | 202 | ||
220 | atomic_set(&hinfo->count, 0); | ||
221 | atomic_set(&hinfo->use, 1); | 203 | atomic_set(&hinfo->use, 1); |
204 | hinfo->count = 0; | ||
205 | hinfo->family = family; | ||
222 | hinfo->rnd_initialized = 0; | 206 | hinfo->rnd_initialized = 0; |
223 | spin_lock_init(&hinfo->lock); | 207 | spin_lock_init(&hinfo->lock); |
224 | hinfo->pde = create_proc_entry(minfo->name, 0, hashlimit_procdir); | 208 | hinfo->pde = create_proc_entry(minfo->name, 0, |
209 | family == AF_INET ? hashlimit_procdir4 : | ||
210 | hashlimit_procdir6); | ||
225 | if (!hinfo->pde) { | 211 | if (!hinfo->pde) { |
226 | vfree(hinfo); | 212 | vfree(hinfo); |
227 | return -1; | 213 | return -1; |
@@ -242,23 +228,21 @@ static int htable_create(struct ipt_hashlimit_info *minfo) | |||
242 | return 0; | 228 | return 0; |
243 | } | 229 | } |
244 | 230 | ||
245 | static int select_all(struct ipt_hashlimit_htable *ht, struct dsthash_ent *he) | 231 | static int select_all(struct xt_hashlimit_htable *ht, struct dsthash_ent *he) |
246 | { | 232 | { |
247 | return 1; | 233 | return 1; |
248 | } | 234 | } |
249 | 235 | ||
250 | static int select_gc(struct ipt_hashlimit_htable *ht, struct dsthash_ent *he) | 236 | static int select_gc(struct xt_hashlimit_htable *ht, struct dsthash_ent *he) |
251 | { | 237 | { |
252 | return (jiffies >= he->expires); | 238 | return (jiffies >= he->expires); |
253 | } | 239 | } |
254 | 240 | ||
255 | static void htable_selective_cleanup(struct ipt_hashlimit_htable *ht, | 241 | static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, |
256 | int (*select)(struct ipt_hashlimit_htable *ht, | 242 | int (*select)(struct xt_hashlimit_htable *ht, |
257 | struct dsthash_ent *he)) | 243 | struct dsthash_ent *he)) |
258 | { | 244 | { |
259 | int i; | 245 | unsigned int i; |
260 | |||
261 | IP_NF_ASSERT(ht->cfg.size && ht->cfg.max); | ||
262 | 246 | ||
263 | /* lock hash table and iterate over it */ | 247 | /* lock hash table and iterate over it */ |
264 | spin_lock_bh(&ht->lock); | 248 | spin_lock_bh(&ht->lock); |
@@ -267,7 +251,7 @@ static void htable_selective_cleanup(struct ipt_hashlimit_htable *ht, | |||
267 | struct hlist_node *pos, *n; | 251 | struct hlist_node *pos, *n; |
268 | hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) { | 252 | hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) { |
269 | if ((*select)(ht, dh)) | 253 | if ((*select)(ht, dh)) |
270 | __dsthash_free(ht, dh); | 254 | dsthash_free(ht, dh); |
271 | } | 255 | } |
272 | } | 256 | } |
273 | spin_unlock_bh(&ht->lock); | 257 | spin_unlock_bh(&ht->lock); |
@@ -276,7 +260,7 @@ static void htable_selective_cleanup(struct ipt_hashlimit_htable *ht, | |||
276 | /* hash table garbage collector, run by timer */ | 260 | /* hash table garbage collector, run by timer */ |
277 | static void htable_gc(unsigned long htlong) | 261 | static void htable_gc(unsigned long htlong) |
278 | { | 262 | { |
279 | struct ipt_hashlimit_htable *ht = (struct ipt_hashlimit_htable *)htlong; | 263 | struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong; |
280 | 264 | ||
281 | htable_selective_cleanup(ht, select_gc); | 265 | htable_selective_cleanup(ht, select_gc); |
282 | 266 | ||
@@ -285,38 +269,39 @@ static void htable_gc(unsigned long htlong) | |||
285 | add_timer(&ht->timer); | 269 | add_timer(&ht->timer); |
286 | } | 270 | } |
287 | 271 | ||
288 | static void htable_destroy(struct ipt_hashlimit_htable *hinfo) | 272 | static void htable_destroy(struct xt_hashlimit_htable *hinfo) |
289 | { | 273 | { |
290 | /* remove timer, if it is pending */ | 274 | /* remove timer, if it is pending */ |
291 | if (timer_pending(&hinfo->timer)) | 275 | if (timer_pending(&hinfo->timer)) |
292 | del_timer(&hinfo->timer); | 276 | del_timer(&hinfo->timer); |
293 | 277 | ||
294 | /* remove proc entry */ | 278 | /* remove proc entry */ |
295 | remove_proc_entry(hinfo->pde->name, hashlimit_procdir); | 279 | remove_proc_entry(hinfo->pde->name, |
296 | 280 | hinfo->family == AF_INET ? hashlimit_procdir4 : | |
281 | hashlimit_procdir6); | ||
297 | htable_selective_cleanup(hinfo, select_all); | 282 | htable_selective_cleanup(hinfo, select_all); |
298 | vfree(hinfo); | 283 | vfree(hinfo); |
299 | } | 284 | } |
300 | 285 | ||
301 | static struct ipt_hashlimit_htable *htable_find_get(char *name) | 286 | static struct xt_hashlimit_htable *htable_find_get(char *name, int family) |
302 | { | 287 | { |
303 | struct ipt_hashlimit_htable *hinfo; | 288 | struct xt_hashlimit_htable *hinfo; |
304 | struct hlist_node *pos; | 289 | struct hlist_node *pos; |
305 | 290 | ||
306 | spin_lock_bh(&hashlimit_lock); | 291 | spin_lock_bh(&hashlimit_lock); |
307 | hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) { | 292 | hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) { |
308 | if (!strcmp(name, hinfo->pde->name)) { | 293 | if (!strcmp(name, hinfo->pde->name) && |
294 | hinfo->family == family) { | ||
309 | atomic_inc(&hinfo->use); | 295 | atomic_inc(&hinfo->use); |
310 | spin_unlock_bh(&hashlimit_lock); | 296 | spin_unlock_bh(&hashlimit_lock); |
311 | return hinfo; | 297 | return hinfo; |
312 | } | 298 | } |
313 | } | 299 | } |
314 | spin_unlock_bh(&hashlimit_lock); | 300 | spin_unlock_bh(&hashlimit_lock); |
315 | |||
316 | return NULL; | 301 | return NULL; |
317 | } | 302 | } |
318 | 303 | ||
319 | static void htable_put(struct ipt_hashlimit_htable *hinfo) | 304 | static void htable_put(struct xt_hashlimit_htable *hinfo) |
320 | { | 305 | { |
321 | if (atomic_dec_and_test(&hinfo->use)) { | 306 | if (atomic_dec_and_test(&hinfo->use)) { |
322 | spin_lock_bh(&hashlimit_lock); | 307 | spin_lock_bh(&hashlimit_lock); |
@@ -326,7 +311,6 @@ static void htable_put(struct ipt_hashlimit_htable *hinfo) | |||
326 | } | 311 | } |
327 | } | 312 | } |
328 | 313 | ||
329 | |||
330 | /* The algorithm used is the Simple Token Bucket Filter (TBF) | 314 | /* The algorithm used is the Simple Token Bucket Filter (TBF) |
331 | * see net/sched/sch_tbf.c in the linux source tree | 315 | * see net/sched/sch_tbf.c in the linux source tree |
332 | */ | 316 | */ |
@@ -370,17 +354,82 @@ user2credits(u_int32_t user) | |||
370 | /* If multiplying would overflow... */ | 354 | /* If multiplying would overflow... */ |
371 | if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) | 355 | if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) |
372 | /* Divide first. */ | 356 | /* Divide first. */ |
373 | return (user / IPT_HASHLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY; | 357 | return (user / XT_HASHLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY; |
374 | 358 | ||
375 | return (user * HZ * CREDITS_PER_JIFFY) / IPT_HASHLIMIT_SCALE; | 359 | return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE; |
376 | } | 360 | } |
377 | 361 | ||
378 | static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now) | 362 | static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now) |
379 | { | 363 | { |
380 | dh->rateinfo.credit += (now - xchg(&dh->rateinfo.prev, now)) | 364 | dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY; |
381 | * CREDITS_PER_JIFFY; | ||
382 | if (dh->rateinfo.credit > dh->rateinfo.credit_cap) | 365 | if (dh->rateinfo.credit > dh->rateinfo.credit_cap) |
383 | dh->rateinfo.credit = dh->rateinfo.credit_cap; | 366 | dh->rateinfo.credit = dh->rateinfo.credit_cap; |
367 | dh->rateinfo.prev = now; | ||
368 | } | ||
369 | |||
370 | static int | ||
371 | hashlimit_init_dst(struct xt_hashlimit_htable *hinfo, struct dsthash_dst *dst, | ||
372 | const struct sk_buff *skb, unsigned int protoff) | ||
373 | { | ||
374 | __be16 _ports[2], *ports; | ||
375 | int nexthdr; | ||
376 | |||
377 | memset(dst, 0, sizeof(*dst)); | ||
378 | |||
379 | switch (hinfo->family) { | ||
380 | case AF_INET: | ||
381 | if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) | ||
382 | dst->addr.ip.dst = skb->nh.iph->daddr; | ||
383 | if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) | ||
384 | dst->addr.ip.src = skb->nh.iph->saddr; | ||
385 | |||
386 | if (!(hinfo->cfg.mode & | ||
387 | (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) | ||
388 | return 0; | ||
389 | nexthdr = skb->nh.iph->protocol; | ||
390 | break; | ||
391 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) | ||
392 | case AF_INET6: | ||
393 | if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) | ||
394 | memcpy(&dst->addr.ip6.dst, &skb->nh.ipv6h->daddr, | ||
395 | sizeof(dst->addr.ip6.dst)); | ||
396 | if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) | ||
397 | memcpy(&dst->addr.ip6.src, &skb->nh.ipv6h->saddr, | ||
398 | sizeof(dst->addr.ip6.src)); | ||
399 | |||
400 | if (!(hinfo->cfg.mode & | ||
401 | (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) | ||
402 | return 0; | ||
403 | nexthdr = ipv6_find_hdr(skb, &protoff, -1, NULL); | ||
404 | if (nexthdr < 0) | ||
405 | return -1; | ||
406 | break; | ||
407 | #endif | ||
408 | default: | ||
409 | BUG(); | ||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | switch (nexthdr) { | ||
414 | case IPPROTO_TCP: | ||
415 | case IPPROTO_UDP: | ||
416 | case IPPROTO_SCTP: | ||
417 | case IPPROTO_DCCP: | ||
418 | ports = skb_header_pointer(skb, protoff, sizeof(_ports), | ||
419 | &_ports); | ||
420 | break; | ||
421 | default: | ||
422 | _ports[0] = _ports[1] = 0; | ||
423 | ports = _ports; | ||
424 | break; | ||
425 | } | ||
426 | if (!ports) | ||
427 | return -1; | ||
428 | if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SPT) | ||
429 | dst->src_port = ports[0]; | ||
430 | if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DPT) | ||
431 | dst->dst_port = ports[1]; | ||
432 | return 0; | ||
384 | } | 433 | } |
385 | 434 | ||
386 | static int | 435 | static int |
@@ -393,68 +442,31 @@ hashlimit_match(const struct sk_buff *skb, | |||
393 | unsigned int protoff, | 442 | unsigned int protoff, |
394 | int *hotdrop) | 443 | int *hotdrop) |
395 | { | 444 | { |
396 | struct ipt_hashlimit_info *r = | 445 | struct xt_hashlimit_info *r = |
397 | ((struct ipt_hashlimit_info *)matchinfo)->u.master; | 446 | ((struct xt_hashlimit_info *)matchinfo)->u.master; |
398 | struct ipt_hashlimit_htable *hinfo = r->hinfo; | 447 | struct xt_hashlimit_htable *hinfo = r->hinfo; |
399 | unsigned long now = jiffies; | 448 | unsigned long now = jiffies; |
400 | struct dsthash_ent *dh; | 449 | struct dsthash_ent *dh; |
401 | struct dsthash_dst dst; | 450 | struct dsthash_dst dst; |
402 | 451 | ||
403 | /* build 'dst' according to hinfo->cfg and current packet */ | 452 | if (hashlimit_init_dst(hinfo, &dst, skb, protoff) < 0) |
404 | memset(&dst, 0, sizeof(dst)); | 453 | goto hotdrop; |
405 | if (hinfo->cfg.mode & IPT_HASHLIMIT_HASH_DIP) | ||
406 | dst.dst_ip = skb->nh.iph->daddr; | ||
407 | if (hinfo->cfg.mode & IPT_HASHLIMIT_HASH_SIP) | ||
408 | dst.src_ip = skb->nh.iph->saddr; | ||
409 | if (hinfo->cfg.mode & IPT_HASHLIMIT_HASH_DPT | ||
410 | ||hinfo->cfg.mode & IPT_HASHLIMIT_HASH_SPT) { | ||
411 | __be16 _ports[2], *ports; | ||
412 | |||
413 | switch (skb->nh.iph->protocol) { | ||
414 | case IPPROTO_TCP: | ||
415 | case IPPROTO_UDP: | ||
416 | case IPPROTO_SCTP: | ||
417 | case IPPROTO_DCCP: | ||
418 | ports = skb_header_pointer(skb, skb->nh.iph->ihl*4, | ||
419 | sizeof(_ports), &_ports); | ||
420 | break; | ||
421 | default: | ||
422 | _ports[0] = _ports[1] = 0; | ||
423 | ports = _ports; | ||
424 | break; | ||
425 | } | ||
426 | if (!ports) { | ||
427 | /* We've been asked to examine this packet, and we | ||
428 | can't. Hence, no choice but to drop. */ | ||
429 | *hotdrop = 1; | ||
430 | return 0; | ||
431 | } | ||
432 | if (hinfo->cfg.mode & IPT_HASHLIMIT_HASH_SPT) | ||
433 | dst.src_port = ports[0]; | ||
434 | if (hinfo->cfg.mode & IPT_HASHLIMIT_HASH_DPT) | ||
435 | dst.dst_port = ports[1]; | ||
436 | } | ||
437 | 454 | ||
438 | spin_lock_bh(&hinfo->lock); | 455 | spin_lock_bh(&hinfo->lock); |
439 | dh = __dsthash_find(hinfo, &dst); | 456 | dh = dsthash_find(hinfo, &dst); |
440 | if (!dh) { | 457 | if (!dh) { |
441 | dh = __dsthash_alloc_init(hinfo, &dst); | 458 | dh = dsthash_alloc_init(hinfo, &dst); |
442 | |||
443 | if (!dh) { | 459 | if (!dh) { |
444 | /* enomem... don't match == DROP */ | ||
445 | if (net_ratelimit()) | ||
446 | printk(KERN_ERR "%s: ENOMEM\n", __FUNCTION__); | ||
447 | spin_unlock_bh(&hinfo->lock); | 460 | spin_unlock_bh(&hinfo->lock); |
448 | return 0; | 461 | goto hotdrop; |
449 | } | 462 | } |
450 | 463 | ||
451 | dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire); | 464 | dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire); |
452 | |||
453 | dh->rateinfo.prev = jiffies; | 465 | dh->rateinfo.prev = jiffies; |
454 | dh->rateinfo.credit = user2credits(hinfo->cfg.avg * | 466 | dh->rateinfo.credit = user2credits(hinfo->cfg.avg * |
455 | hinfo->cfg.burst); | 467 | hinfo->cfg.burst); |
456 | dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * | 468 | dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * |
457 | hinfo->cfg.burst); | 469 | hinfo->cfg.burst); |
458 | dh->rateinfo.cost = user2credits(hinfo->cfg.avg); | 470 | dh->rateinfo.cost = user2credits(hinfo->cfg.avg); |
459 | } else { | 471 | } else { |
460 | /* update expiration timeout */ | 472 | /* update expiration timeout */ |
@@ -473,6 +485,10 @@ hashlimit_match(const struct sk_buff *skb, | |||
473 | 485 | ||
474 | /* default case: we're overlimit, thus don't match */ | 486 | /* default case: we're overlimit, thus don't match */ |
475 | return 0; | 487 | return 0; |
488 | |||
489 | hotdrop: | ||
490 | *hotdrop = 1; | ||
491 | return 0; | ||
476 | } | 492 | } |
477 | 493 | ||
478 | static int | 494 | static int |
@@ -482,42 +498,37 @@ hashlimit_checkentry(const char *tablename, | |||
482 | void *matchinfo, | 498 | void *matchinfo, |
483 | unsigned int hook_mask) | 499 | unsigned int hook_mask) |
484 | { | 500 | { |
485 | struct ipt_hashlimit_info *r = matchinfo; | 501 | struct xt_hashlimit_info *r = matchinfo; |
486 | 502 | ||
487 | /* Check for overflow. */ | 503 | /* Check for overflow. */ |
488 | if (r->cfg.burst == 0 | 504 | if (r->cfg.burst == 0 || |
489 | || user2credits(r->cfg.avg * r->cfg.burst) < | 505 | user2credits(r->cfg.avg * r->cfg.burst) < user2credits(r->cfg.avg)) { |
490 | user2credits(r->cfg.avg)) { | 506 | printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n", |
491 | printk(KERN_ERR "ipt_hashlimit: Overflow, try lower: %u/%u\n", | ||
492 | r->cfg.avg, r->cfg.burst); | 507 | r->cfg.avg, r->cfg.burst); |
493 | return 0; | 508 | return 0; |
494 | } | 509 | } |
495 | 510 | if (r->cfg.mode == 0 || | |
496 | if (r->cfg.mode == 0 | 511 | r->cfg.mode > (XT_HASHLIMIT_HASH_DPT | |
497 | || r->cfg.mode > (IPT_HASHLIMIT_HASH_DPT | 512 | XT_HASHLIMIT_HASH_DIP | |
498 | |IPT_HASHLIMIT_HASH_DIP | 513 | XT_HASHLIMIT_HASH_SIP | |
499 | |IPT_HASHLIMIT_HASH_SIP | 514 | XT_HASHLIMIT_HASH_SPT)) |
500 | |IPT_HASHLIMIT_HASH_SPT)) | ||
501 | return 0; | 515 | return 0; |
502 | |||
503 | if (!r->cfg.gc_interval) | 516 | if (!r->cfg.gc_interval) |
504 | return 0; | 517 | return 0; |
505 | |||
506 | if (!r->cfg.expire) | 518 | if (!r->cfg.expire) |
507 | return 0; | 519 | return 0; |
508 | |||
509 | if (r->name[sizeof(r->name) - 1] != '\0') | 520 | if (r->name[sizeof(r->name) - 1] != '\0') |
510 | return 0; | 521 | return 0; |
511 | 522 | ||
512 | /* This is the best we've got: We cannot release and re-grab lock, | 523 | /* This is the best we've got: We cannot release and re-grab lock, |
513 | * since checkentry() is called before ip_tables.c grabs ipt_mutex. | 524 | * since checkentry() is called before x_tables.c grabs xt_mutex. |
514 | * We also cannot grab the hashtable spinlock, since htable_create will | 525 | * We also cannot grab the hashtable spinlock, since htable_create will |
515 | * call vmalloc, and that can sleep. And we cannot just re-search | 526 | * call vmalloc, and that can sleep. And we cannot just re-search |
516 | * the list of htable's in htable_create(), since then we would | 527 | * the list of htable's in htable_create(), since then we would |
517 | * create duplicate proc files. -HW */ | 528 | * create duplicate proc files. -HW */ |
518 | mutex_lock(&hlimit_mutex); | 529 | mutex_lock(&hlimit_mutex); |
519 | r->hinfo = htable_find_get(r->name); | 530 | r->hinfo = htable_find_get(r->name, match->family); |
520 | if (!r->hinfo && (htable_create(r) != 0)) { | 531 | if (!r->hinfo && htable_create(r, match->family) != 0) { |
521 | mutex_unlock(&hlimit_mutex); | 532 | mutex_unlock(&hlimit_mutex); |
522 | return 0; | 533 | return 0; |
523 | } | 534 | } |
@@ -525,20 +536,19 @@ hashlimit_checkentry(const char *tablename, | |||
525 | 536 | ||
526 | /* Ugly hack: For SMP, we only want to use one set */ | 537 | /* Ugly hack: For SMP, we only want to use one set */ |
527 | r->u.master = r; | 538 | r->u.master = r; |
528 | |||
529 | return 1; | 539 | return 1; |
530 | } | 540 | } |
531 | 541 | ||
532 | static void | 542 | static void |
533 | hashlimit_destroy(const struct xt_match *match, void *matchinfo) | 543 | hashlimit_destroy(const struct xt_match *match, void *matchinfo) |
534 | { | 544 | { |
535 | struct ipt_hashlimit_info *r = matchinfo; | 545 | struct xt_hashlimit_info *r = matchinfo; |
536 | 546 | ||
537 | htable_put(r->hinfo); | 547 | htable_put(r->hinfo); |
538 | } | 548 | } |
539 | 549 | ||
540 | #ifdef CONFIG_COMPAT | 550 | #ifdef CONFIG_COMPAT |
541 | struct compat_ipt_hashlimit_info { | 551 | struct compat_xt_hashlimit_info { |
542 | char name[IFNAMSIZ]; | 552 | char name[IFNAMSIZ]; |
543 | struct hashlimit_cfg cfg; | 553 | struct hashlimit_cfg cfg; |
544 | compat_uptr_t hinfo; | 554 | compat_uptr_t hinfo; |
@@ -547,40 +557,56 @@ struct compat_ipt_hashlimit_info { | |||
547 | 557 | ||
548 | static void compat_from_user(void *dst, void *src) | 558 | static void compat_from_user(void *dst, void *src) |
549 | { | 559 | { |
550 | int off = offsetof(struct compat_ipt_hashlimit_info, hinfo); | 560 | int off = offsetof(struct compat_xt_hashlimit_info, hinfo); |
551 | 561 | ||
552 | memcpy(dst, src, off); | 562 | memcpy(dst, src, off); |
553 | memset(dst + off, 0, sizeof(struct compat_ipt_hashlimit_info) - off); | 563 | memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off); |
554 | } | 564 | } |
555 | 565 | ||
556 | static int compat_to_user(void __user *dst, void *src) | 566 | static int compat_to_user(void __user *dst, void *src) |
557 | { | 567 | { |
558 | int off = offsetof(struct compat_ipt_hashlimit_info, hinfo); | 568 | int off = offsetof(struct compat_xt_hashlimit_info, hinfo); |
559 | 569 | ||
560 | return copy_to_user(dst, src, off) ? -EFAULT : 0; | 570 | return copy_to_user(dst, src, off) ? -EFAULT : 0; |
561 | } | 571 | } |
562 | #endif | 572 | #endif |
563 | 573 | ||
564 | static struct ipt_match ipt_hashlimit = { | 574 | static struct xt_match xt_hashlimit[] = { |
565 | .name = "hashlimit", | 575 | { |
566 | .match = hashlimit_match, | 576 | .name = "hashlimit", |
567 | .matchsize = sizeof(struct ipt_hashlimit_info), | 577 | .family = AF_INET, |
578 | .match = hashlimit_match, | ||
579 | .matchsize = sizeof(struct xt_hashlimit_info), | ||
580 | #ifdef CONFIG_COMPAT | ||
581 | .compatsize = sizeof(struct compat_xt_hashlimit_info), | ||
582 | .compat_from_user = compat_from_user, | ||
583 | .compat_to_user = compat_to_user, | ||
584 | #endif | ||
585 | .checkentry = hashlimit_checkentry, | ||
586 | .destroy = hashlimit_destroy, | ||
587 | .me = THIS_MODULE | ||
588 | }, | ||
589 | { | ||
590 | .name = "hashlimit", | ||
591 | .family = AF_INET6, | ||
592 | .match = hashlimit_match, | ||
593 | .matchsize = sizeof(struct xt_hashlimit_info), | ||
568 | #ifdef CONFIG_COMPAT | 594 | #ifdef CONFIG_COMPAT |
569 | .compatsize = sizeof(struct compat_ipt_hashlimit_info), | 595 | .compatsize = sizeof(struct compat_xt_hashlimit_info), |
570 | .compat_from_user = compat_from_user, | 596 | .compat_from_user = compat_from_user, |
571 | .compat_to_user = compat_to_user, | 597 | .compat_to_user = compat_to_user, |
572 | #endif | 598 | #endif |
573 | .checkentry = hashlimit_checkentry, | 599 | .checkentry = hashlimit_checkentry, |
574 | .destroy = hashlimit_destroy, | 600 | .destroy = hashlimit_destroy, |
575 | .me = THIS_MODULE | 601 | .me = THIS_MODULE |
602 | }, | ||
576 | }; | 603 | }; |
577 | 604 | ||
578 | /* PROC stuff */ | 605 | /* PROC stuff */ |
579 | |||
580 | static void *dl_seq_start(struct seq_file *s, loff_t *pos) | 606 | static void *dl_seq_start(struct seq_file *s, loff_t *pos) |
581 | { | 607 | { |
582 | struct proc_dir_entry *pde = s->private; | 608 | struct proc_dir_entry *pde = s->private; |
583 | struct ipt_hashlimit_htable *htable = pde->data; | 609 | struct xt_hashlimit_htable *htable = pde->data; |
584 | unsigned int *bucket; | 610 | unsigned int *bucket; |
585 | 611 | ||
586 | spin_lock_bh(&htable->lock); | 612 | spin_lock_bh(&htable->lock); |
@@ -598,7 +624,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos) | |||
598 | static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) | 624 | static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) |
599 | { | 625 | { |
600 | struct proc_dir_entry *pde = s->private; | 626 | struct proc_dir_entry *pde = s->private; |
601 | struct ipt_hashlimit_htable *htable = pde->data; | 627 | struct xt_hashlimit_htable *htable = pde->data; |
602 | unsigned int *bucket = (unsigned int *)v; | 628 | unsigned int *bucket = (unsigned int *)v; |
603 | 629 | ||
604 | *pos = ++(*bucket); | 630 | *pos = ++(*bucket); |
@@ -612,43 +638,59 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) | |||
612 | static void dl_seq_stop(struct seq_file *s, void *v) | 638 | static void dl_seq_stop(struct seq_file *s, void *v) |
613 | { | 639 | { |
614 | struct proc_dir_entry *pde = s->private; | 640 | struct proc_dir_entry *pde = s->private; |
615 | struct ipt_hashlimit_htable *htable = pde->data; | 641 | struct xt_hashlimit_htable *htable = pde->data; |
616 | unsigned int *bucket = (unsigned int *)v; | 642 | unsigned int *bucket = (unsigned int *)v; |
617 | 643 | ||
618 | kfree(bucket); | 644 | kfree(bucket); |
619 | |||
620 | spin_unlock_bh(&htable->lock); | 645 | spin_unlock_bh(&htable->lock); |
621 | } | 646 | } |
622 | 647 | ||
623 | static inline int dl_seq_real_show(struct dsthash_ent *ent, struct seq_file *s) | 648 | static int dl_seq_real_show(struct dsthash_ent *ent, int family, |
649 | struct seq_file *s) | ||
624 | { | 650 | { |
625 | /* recalculate to show accurate numbers */ | 651 | /* recalculate to show accurate numbers */ |
626 | rateinfo_recalc(ent, jiffies); | 652 | rateinfo_recalc(ent, jiffies); |
627 | 653 | ||
628 | return seq_printf(s, "%ld %u.%u.%u.%u:%u->%u.%u.%u.%u:%u %u %u %u\n", | 654 | switch (family) { |
629 | (long)(ent->expires - jiffies)/HZ, | 655 | case AF_INET: |
630 | NIPQUAD(ent->dst.src_ip), ntohs(ent->dst.src_port), | 656 | return seq_printf(s, "%ld %u.%u.%u.%u:%u->" |
631 | NIPQUAD(ent->dst.dst_ip), ntohs(ent->dst.dst_port), | 657 | "%u.%u.%u.%u:%u %u %u %u\n", |
632 | ent->rateinfo.credit, ent->rateinfo.credit_cap, | 658 | (long)(ent->expires - jiffies)/HZ, |
633 | ent->rateinfo.cost); | 659 | NIPQUAD(ent->dst.addr.ip.src), |
660 | ntohs(ent->dst.src_port), | ||
661 | NIPQUAD(ent->dst.addr.ip.dst), | ||
662 | ntohs(ent->dst.dst_port), | ||
663 | ent->rateinfo.credit, ent->rateinfo.credit_cap, | ||
664 | ent->rateinfo.cost); | ||
665 | case AF_INET6: | ||
666 | return seq_printf(s, "%ld " NIP6_FMT ":%u->" | ||
667 | NIP6_FMT ":%u %u %u %u\n", | ||
668 | (long)(ent->expires - jiffies)/HZ, | ||
669 | NIP6(*(struct in6_addr *)&ent->dst.addr.ip6.src), | ||
670 | ntohs(ent->dst.src_port), | ||
671 | NIP6(*(struct in6_addr *)&ent->dst.addr.ip6.dst), | ||
672 | ntohs(ent->dst.dst_port), | ||
673 | ent->rateinfo.credit, ent->rateinfo.credit_cap, | ||
674 | ent->rateinfo.cost); | ||
675 | default: | ||
676 | BUG(); | ||
677 | return 0; | ||
678 | } | ||
634 | } | 679 | } |
635 | 680 | ||
636 | static int dl_seq_show(struct seq_file *s, void *v) | 681 | static int dl_seq_show(struct seq_file *s, void *v) |
637 | { | 682 | { |
638 | struct proc_dir_entry *pde = s->private; | 683 | struct proc_dir_entry *pde = s->private; |
639 | struct ipt_hashlimit_htable *htable = pde->data; | 684 | struct xt_hashlimit_htable *htable = pde->data; |
640 | unsigned int *bucket = (unsigned int *)v; | 685 | unsigned int *bucket = (unsigned int *)v; |
641 | struct dsthash_ent *ent; | 686 | struct dsthash_ent *ent; |
642 | struct hlist_node *pos; | 687 | struct hlist_node *pos; |
643 | 688 | ||
644 | if (!hlist_empty(&htable->hash[*bucket])) | 689 | if (!hlist_empty(&htable->hash[*bucket])) { |
645 | hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) { | 690 | hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) |
646 | if (dl_seq_real_show(ent, s)) { | 691 | if (dl_seq_real_show(ent, htable->family, s)) |
647 | /* buffer was filled and unable to print that tuple */ | ||
648 | return 1; | 692 | return 1; |
649 | } | 693 | } |
650 | } | ||
651 | |||
652 | return 0; | 694 | return 0; |
653 | } | 695 | } |
654 | 696 | ||
@@ -678,56 +720,53 @@ static struct file_operations dl_file_ops = { | |||
678 | .release = seq_release | 720 | .release = seq_release |
679 | }; | 721 | }; |
680 | 722 | ||
681 | static int init_or_fini(int fini) | 723 | static int __init xt_hashlimit_init(void) |
682 | { | 724 | { |
683 | int ret = 0; | 725 | int err; |
684 | |||
685 | if (fini) | ||
686 | goto cleanup; | ||
687 | 726 | ||
688 | if (ipt_register_match(&ipt_hashlimit)) { | 727 | err = xt_register_matches(xt_hashlimit, ARRAY_SIZE(xt_hashlimit)); |
689 | ret = -EINVAL; | 728 | if (err < 0) |
690 | goto cleanup_nothing; | 729 | goto err1; |
691 | } | ||
692 | 730 | ||
693 | hashlimit_cachep = kmem_cache_create("ipt_hashlimit", | 731 | err = -ENOMEM; |
694 | sizeof(struct dsthash_ent), 0, | 732 | hashlimit_cachep = kmem_cache_create("xt_hashlimit", |
695 | 0, NULL, NULL); | 733 | sizeof(struct dsthash_ent), 0, 0, |
734 | NULL, NULL); | ||
696 | if (!hashlimit_cachep) { | 735 | if (!hashlimit_cachep) { |
697 | printk(KERN_ERR "Unable to create ipt_hashlimit slab cache\n"); | 736 | printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n"); |
698 | ret = -ENOMEM; | 737 | goto err2; |
699 | goto cleanup_unreg_match; | ||
700 | } | 738 | } |
701 | 739 | hashlimit_procdir4 = proc_mkdir("ipt_hashlimit", proc_net); | |
702 | hashlimit_procdir = proc_mkdir("ipt_hashlimit", proc_net); | 740 | if (!hashlimit_procdir4) { |
703 | if (!hashlimit_procdir) { | 741 | printk(KERN_ERR "xt_hashlimit: unable to create proc dir " |
704 | printk(KERN_ERR "Unable to create proc dir entry\n"); | 742 | "entry\n"); |
705 | ret = -ENOMEM; | 743 | goto err3; |
706 | goto cleanup_free_slab; | ||
707 | } | 744 | } |
708 | 745 | hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", proc_net); | |
709 | return ret; | 746 | if (!hashlimit_procdir6) { |
710 | 747 | printk(KERN_ERR "xt_hashlimit: tnable to create proc dir " | |
711 | cleanup: | 748 | "entry\n"); |
749 | goto err4; | ||
750 | } | ||
751 | return 0; | ||
752 | err4: | ||
712 | remove_proc_entry("ipt_hashlimit", proc_net); | 753 | remove_proc_entry("ipt_hashlimit", proc_net); |
713 | cleanup_free_slab: | 754 | err3: |
714 | kmem_cache_destroy(hashlimit_cachep); | 755 | kmem_cache_destroy(hashlimit_cachep); |
715 | cleanup_unreg_match: | 756 | err2: |
716 | ipt_unregister_match(&ipt_hashlimit); | 757 | xt_unregister_matches(xt_hashlimit, ARRAY_SIZE(xt_hashlimit)); |
717 | cleanup_nothing: | 758 | err1: |
718 | return ret; | 759 | return err; |
719 | |||
720 | } | ||
721 | 760 | ||
722 | static int __init ipt_hashlimit_init(void) | ||
723 | { | ||
724 | return init_or_fini(0); | ||
725 | } | 761 | } |
726 | 762 | ||
727 | static void __exit ipt_hashlimit_fini(void) | 763 | static void __exit xt_hashlimit_fini(void) |
728 | { | 764 | { |
729 | init_or_fini(1); | 765 | remove_proc_entry("ipt_hashlimit", proc_net); |
766 | remove_proc_entry("ip6t_hashlimit", proc_net); | ||
767 | kmem_cache_destroy(hashlimit_cachep); | ||
768 | xt_unregister_matches(xt_hashlimit, ARRAY_SIZE(xt_hashlimit)); | ||
730 | } | 769 | } |
731 | 770 | ||
732 | module_init(ipt_hashlimit_init); | 771 | module_init(xt_hashlimit_init); |
733 | module_exit(ipt_hashlimit_fini); | 772 | module_exit(xt_hashlimit_fini); |
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c index 934dddfbcd23..dfa1ee6914c0 100644 --- a/net/netfilter/xt_mark.c +++ b/net/netfilter/xt_mark.c | |||
@@ -31,7 +31,7 @@ match(const struct sk_buff *skb, | |||
31 | { | 31 | { |
32 | const struct xt_mark_info *info = matchinfo; | 32 | const struct xt_mark_info *info = matchinfo; |
33 | 33 | ||
34 | return ((skb->nfmark & info->mask) == info->mark) ^ info->invert; | 34 | return ((skb->mark & info->mask) == info->mark) ^ info->invert; |
35 | } | 35 | } |
36 | 36 | ||
37 | static int | 37 | static int |
diff --git a/net/netfilter/xt_multiport.c b/net/netfilter/xt_multiport.c index d3aefd380930..1602086c7fd6 100644 --- a/net/netfilter/xt_multiport.c +++ b/net/netfilter/xt_multiport.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* Kernel module to match one of a list of TCP/UDP/SCTP/DCCP ports: ports are in | 1 | /* Kernel module to match one of a list of TCP/UDP(-Lite)/SCTP/DCCP ports: |
2 | the same place so we can treat them as equal. */ | 2 | ports are in the same place so we can treat them as equal. */ |
3 | 3 | ||
4 | /* (C) 1999-2001 Paul `Rusty' Russell | 4 | /* (C) 1999-2001 Paul `Rusty' Russell |
5 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> | 5 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> |
@@ -104,7 +104,7 @@ match(const struct sk_buff *skb, | |||
104 | unsigned int protoff, | 104 | unsigned int protoff, |
105 | int *hotdrop) | 105 | int *hotdrop) |
106 | { | 106 | { |
107 | u16 _ports[2], *pptr; | 107 | __be16 _ports[2], *pptr; |
108 | const struct xt_multiport *multiinfo = matchinfo; | 108 | const struct xt_multiport *multiinfo = matchinfo; |
109 | 109 | ||
110 | if (offset) | 110 | if (offset) |
@@ -135,7 +135,7 @@ match_v1(const struct sk_buff *skb, | |||
135 | unsigned int protoff, | 135 | unsigned int protoff, |
136 | int *hotdrop) | 136 | int *hotdrop) |
137 | { | 137 | { |
138 | u16 _ports[2], *pptr; | 138 | __be16 _ports[2], *pptr; |
139 | const struct xt_multiport_v1 *multiinfo = matchinfo; | 139 | const struct xt_multiport_v1 *multiinfo = matchinfo; |
140 | 140 | ||
141 | if (offset) | 141 | if (offset) |
@@ -162,6 +162,7 @@ check(u_int16_t proto, | |||
162 | { | 162 | { |
163 | /* Must specify supported protocol, no unknown flags or bad count */ | 163 | /* Must specify supported protocol, no unknown flags or bad count */ |
164 | return (proto == IPPROTO_TCP || proto == IPPROTO_UDP | 164 | return (proto == IPPROTO_TCP || proto == IPPROTO_UDP |
165 | || proto == IPPROTO_UDPLITE | ||
165 | || proto == IPPROTO_SCTP || proto == IPPROTO_DCCP) | 166 | || proto == IPPROTO_SCTP || proto == IPPROTO_DCCP) |
166 | && !(ip_invflags & XT_INV_PROTO) | 167 | && !(ip_invflags & XT_INV_PROTO) |
167 | && (match_flags == XT_MULTIPORT_SOURCE | 168 | && (match_flags == XT_MULTIPORT_SOURCE |
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c index 7956acaaa24b..71bf036f833c 100644 --- a/net/netfilter/xt_sctp.c +++ b/net/netfilter/xt_sctp.c | |||
@@ -71,7 +71,7 @@ match_packet(const struct sk_buff *skb, | |||
71 | duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n", | 71 | duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n", |
72 | ++i, offset, sch->type, htons(sch->length), sch->flags); | 72 | ++i, offset, sch->type, htons(sch->length), sch->flags); |
73 | 73 | ||
74 | offset += (htons(sch->length) + 3) & ~3; | 74 | offset += (ntohs(sch->length) + 3) & ~3; |
75 | 75 | ||
76 | duprintf("skb->len: %d\toffset: %d\n", skb->len, offset); | 76 | duprintf("skb->len: %d\toffset: %d\n", skb->len, offset); |
77 | 77 | ||
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c index e76a68e0bc66..46414b562a19 100644 --- a/net/netfilter/xt_tcpudp.c +++ b/net/netfilter/xt_tcpudp.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/netfilter_ipv4/ip_tables.h> | 10 | #include <linux/netfilter_ipv4/ip_tables.h> |
11 | #include <linux/netfilter_ipv6/ip6_tables.h> | 11 | #include <linux/netfilter_ipv6/ip6_tables.h> |
12 | 12 | ||
13 | MODULE_DESCRIPTION("x_tables match for TCP and UDP, supports IPv4 and IPv6"); | 13 | MODULE_DESCRIPTION("x_tables match for TCP and UDP(-Lite), supports IPv4 and IPv6"); |
14 | MODULE_LICENSE("GPL"); | 14 | MODULE_LICENSE("GPL"); |
15 | MODULE_ALIAS("xt_tcp"); | 15 | MODULE_ALIAS("xt_tcp"); |
16 | MODULE_ALIAS("xt_udp"); | 16 | MODULE_ALIAS("xt_udp"); |
@@ -234,6 +234,24 @@ static struct xt_match xt_tcpudp_match[] = { | |||
234 | .proto = IPPROTO_UDP, | 234 | .proto = IPPROTO_UDP, |
235 | .me = THIS_MODULE, | 235 | .me = THIS_MODULE, |
236 | }, | 236 | }, |
237 | { | ||
238 | .name = "udplite", | ||
239 | .family = AF_INET, | ||
240 | .checkentry = udp_checkentry, | ||
241 | .match = udp_match, | ||
242 | .matchsize = sizeof(struct xt_udp), | ||
243 | .proto = IPPROTO_UDPLITE, | ||
244 | .me = THIS_MODULE, | ||
245 | }, | ||
246 | { | ||
247 | .name = "udplite", | ||
248 | .family = AF_INET6, | ||
249 | .checkentry = udp_checkentry, | ||
250 | .match = udp_match, | ||
251 | .matchsize = sizeof(struct xt_udp), | ||
252 | .proto = IPPROTO_UDPLITE, | ||
253 | .me = THIS_MODULE, | ||
254 | }, | ||
237 | }; | 255 | }; |
238 | 256 | ||
239 | static int __init xt_tcpudp_init(void) | 257 | static int __init xt_tcpudp_init(void) |
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index a6ce1d6d5c59..743b05734a49 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c | |||
@@ -407,12 +407,14 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info) | |||
407 | 407 | ||
408 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_ADD, | 408 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_ADD, |
409 | &audit_info); | 409 | &audit_info); |
410 | audit_log_format(audit_buf, | 410 | if (audit_buf != NULL) { |
411 | " cipso_doi=%u cipso_type=%s res=%u", | 411 | audit_log_format(audit_buf, |
412 | doi, | 412 | " cipso_doi=%u cipso_type=%s res=%u", |
413 | type_str, | 413 | doi, |
414 | ret_val == 0 ? 1 : 0); | 414 | type_str, |
415 | audit_log_end(audit_buf); | 415 | ret_val == 0 ? 1 : 0); |
416 | audit_log_end(audit_buf); | ||
417 | } | ||
416 | 418 | ||
417 | return ret_val; | 419 | return ret_val; |
418 | } | 420 | } |
@@ -452,17 +454,13 @@ static int netlbl_cipsov4_list(struct sk_buff *skb, struct genl_info *info) | |||
452 | } | 454 | } |
453 | 455 | ||
454 | list_start: | 456 | list_start: |
455 | ans_skb = nlmsg_new(NLMSG_GOODSIZE * nlsze_mult, GFP_KERNEL); | 457 | ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE * nlsze_mult, GFP_KERNEL); |
456 | if (ans_skb == NULL) { | 458 | if (ans_skb == NULL) { |
457 | ret_val = -ENOMEM; | 459 | ret_val = -ENOMEM; |
458 | goto list_failure; | 460 | goto list_failure; |
459 | } | 461 | } |
460 | data = netlbl_netlink_hdr_put(ans_skb, | 462 | data = genlmsg_put_reply(ans_skb, info, &netlbl_cipsov4_gnl_family, |
461 | info->snd_pid, | 463 | 0, NLBL_CIPSOV4_C_LIST); |
462 | info->snd_seq, | ||
463 | netlbl_cipsov4_gnl_family.id, | ||
464 | 0, | ||
465 | NLBL_CIPSOV4_C_LIST); | ||
466 | if (data == NULL) { | 464 | if (data == NULL) { |
467 | ret_val = -ENOMEM; | 465 | ret_val = -ENOMEM; |
468 | goto list_failure; | 466 | goto list_failure; |
@@ -568,7 +566,7 @@ list_start: | |||
568 | 566 | ||
569 | genlmsg_end(ans_skb, data); | 567 | genlmsg_end(ans_skb, data); |
570 | 568 | ||
571 | ret_val = genlmsg_unicast(ans_skb, info->snd_pid); | 569 | ret_val = genlmsg_reply(ans_skb, info); |
572 | if (ret_val != 0) | 570 | if (ret_val != 0) |
573 | goto list_failure; | 571 | goto list_failure; |
574 | 572 | ||
@@ -607,12 +605,9 @@ static int netlbl_cipsov4_listall_cb(struct cipso_v4_doi *doi_def, void *arg) | |||
607 | struct netlbl_cipsov4_doiwalk_arg *cb_arg = arg; | 605 | struct netlbl_cipsov4_doiwalk_arg *cb_arg = arg; |
608 | void *data; | 606 | void *data; |
609 | 607 | ||
610 | data = netlbl_netlink_hdr_put(cb_arg->skb, | 608 | data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid, |
611 | NETLINK_CB(cb_arg->nl_cb->skb).pid, | 609 | cb_arg->seq, &netlbl_cipsov4_gnl_family, |
612 | cb_arg->seq, | 610 | NLM_F_MULTI, NLBL_CIPSOV4_C_LISTALL); |
613 | netlbl_cipsov4_gnl_family.id, | ||
614 | NLM_F_MULTI, | ||
615 | NLBL_CIPSOV4_C_LISTALL); | ||
616 | if (data == NULL) | 611 | if (data == NULL) |
617 | goto listall_cb_failure; | 612 | goto listall_cb_failure; |
618 | 613 | ||
@@ -687,11 +682,13 @@ static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info) | |||
687 | 682 | ||
688 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_DEL, | 683 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_DEL, |
689 | &audit_info); | 684 | &audit_info); |
690 | audit_log_format(audit_buf, | 685 | if (audit_buf != NULL) { |
691 | " cipso_doi=%u res=%u", | 686 | audit_log_format(audit_buf, |
692 | doi, | 687 | " cipso_doi=%u res=%u", |
693 | ret_val == 0 ? 1 : 0); | 688 | doi, |
694 | audit_log_end(audit_buf); | 689 | ret_val == 0 ? 1 : 0); |
690 | audit_log_end(audit_buf); | ||
691 | } | ||
695 | 692 | ||
696 | return ret_val; | 693 | return ret_val; |
697 | } | 694 | } |
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index af4371d3b459..f46a0aeec44f 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c | |||
@@ -202,7 +202,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
202 | int ret_val; | 202 | int ret_val; |
203 | u32 bkt; | 203 | u32 bkt; |
204 | struct audit_buffer *audit_buf; | 204 | struct audit_buffer *audit_buf; |
205 | char *audit_domain; | ||
206 | 205 | ||
207 | switch (entry->type) { | 206 | switch (entry->type) { |
208 | case NETLBL_NLTYPE_UNLABELED: | 207 | case NETLBL_NLTYPE_UNLABELED: |
@@ -243,24 +242,24 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, | |||
243 | } else | 242 | } else |
244 | ret_val = -EINVAL; | 243 | ret_val = -EINVAL; |
245 | 244 | ||
246 | if (entry->domain != NULL) | ||
247 | audit_domain = entry->domain; | ||
248 | else | ||
249 | audit_domain = "(default)"; | ||
250 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info); | 245 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_ADD, audit_info); |
251 | audit_log_format(audit_buf, " nlbl_domain=%s", audit_domain); | 246 | if (audit_buf != NULL) { |
252 | switch (entry->type) { | ||
253 | case NETLBL_NLTYPE_UNLABELED: | ||
254 | audit_log_format(audit_buf, " nlbl_protocol=unlbl"); | ||
255 | break; | ||
256 | case NETLBL_NLTYPE_CIPSOV4: | ||
257 | audit_log_format(audit_buf, | 247 | audit_log_format(audit_buf, |
258 | " nlbl_protocol=cipsov4 cipso_doi=%u", | 248 | " nlbl_domain=%s", |
259 | entry->type_def.cipsov4->doi); | 249 | entry->domain ? entry->domain : "(default)"); |
260 | break; | 250 | switch (entry->type) { |
251 | case NETLBL_NLTYPE_UNLABELED: | ||
252 | audit_log_format(audit_buf, " nlbl_protocol=unlbl"); | ||
253 | break; | ||
254 | case NETLBL_NLTYPE_CIPSOV4: | ||
255 | audit_log_format(audit_buf, | ||
256 | " nlbl_protocol=cipsov4 cipso_doi=%u", | ||
257 | entry->type_def.cipsov4->doi); | ||
258 | break; | ||
259 | } | ||
260 | audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); | ||
261 | audit_log_end(audit_buf); | ||
261 | } | 262 | } |
262 | audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0); | ||
263 | audit_log_end(audit_buf); | ||
264 | 263 | ||
265 | rcu_read_unlock(); | 264 | rcu_read_unlock(); |
266 | 265 | ||
@@ -310,7 +309,6 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info) | |||
310 | int ret_val = -ENOENT; | 309 | int ret_val = -ENOENT; |
311 | struct netlbl_dom_map *entry; | 310 | struct netlbl_dom_map *entry; |
312 | struct audit_buffer *audit_buf; | 311 | struct audit_buffer *audit_buf; |
313 | char *audit_domain; | ||
314 | 312 | ||
315 | rcu_read_lock(); | 313 | rcu_read_lock(); |
316 | if (domain != NULL) | 314 | if (domain != NULL) |
@@ -348,16 +346,14 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info) | |||
348 | spin_unlock(&netlbl_domhsh_def_lock); | 346 | spin_unlock(&netlbl_domhsh_def_lock); |
349 | } | 347 | } |
350 | 348 | ||
351 | if (entry->domain != NULL) | ||
352 | audit_domain = entry->domain; | ||
353 | else | ||
354 | audit_domain = "(default)"; | ||
355 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info); | 349 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info); |
356 | audit_log_format(audit_buf, | 350 | if (audit_buf != NULL) { |
357 | " nlbl_domain=%s res=%u", | 351 | audit_log_format(audit_buf, |
358 | audit_domain, | 352 | " nlbl_domain=%s res=%u", |
359 | ret_val == 0 ? 1 : 0); | 353 | entry->domain ? entry->domain : "(default)", |
360 | audit_log_end(audit_buf); | 354 | ret_val == 0 ? 1 : 0); |
355 | audit_log_end(audit_buf); | ||
356 | } | ||
361 | 357 | ||
362 | if (ret_val == 0) | 358 | if (ret_val == 0) |
363 | call_rcu(&entry->rcu, netlbl_domhsh_free_entry); | 359 | call_rcu(&entry->rcu, netlbl_domhsh_free_entry); |
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index ff971103fd0c..e03a3282c551 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c | |||
@@ -40,6 +40,207 @@ | |||
40 | #include "netlabel_user.h" | 40 | #include "netlabel_user.h" |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Security Attribute Functions | ||
44 | */ | ||
45 | |||
46 | /** | ||
47 | * netlbl_secattr_catmap_walk - Walk a LSM secattr catmap looking for a bit | ||
48 | * @catmap: the category bitmap | ||
49 | * @offset: the offset to start searching at, in bits | ||
50 | * | ||
51 | * Description: | ||
52 | * This function walks a LSM secattr category bitmap starting at @offset and | ||
53 | * returns the spot of the first set bit or -ENOENT if no bits are set. | ||
54 | * | ||
55 | */ | ||
56 | int netlbl_secattr_catmap_walk(struct netlbl_lsm_secattr_catmap *catmap, | ||
57 | u32 offset) | ||
58 | { | ||
59 | struct netlbl_lsm_secattr_catmap *iter = catmap; | ||
60 | u32 node_idx; | ||
61 | u32 node_bit; | ||
62 | NETLBL_CATMAP_MAPTYPE bitmap; | ||
63 | |||
64 | if (offset > iter->startbit) { | ||
65 | while (offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) { | ||
66 | iter = iter->next; | ||
67 | if (iter == NULL) | ||
68 | return -ENOENT; | ||
69 | } | ||
70 | node_idx = (offset - iter->startbit) / NETLBL_CATMAP_MAPSIZE; | ||
71 | node_bit = offset - iter->startbit - | ||
72 | (NETLBL_CATMAP_MAPSIZE * node_idx); | ||
73 | } else { | ||
74 | node_idx = 0; | ||
75 | node_bit = 0; | ||
76 | } | ||
77 | bitmap = iter->bitmap[node_idx] >> node_bit; | ||
78 | |||
79 | for (;;) { | ||
80 | if (bitmap != 0) { | ||
81 | while ((bitmap & NETLBL_CATMAP_BIT) == 0) { | ||
82 | bitmap >>= 1; | ||
83 | node_bit++; | ||
84 | } | ||
85 | return iter->startbit + | ||
86 | (NETLBL_CATMAP_MAPSIZE * node_idx) + node_bit; | ||
87 | } | ||
88 | if (++node_idx >= NETLBL_CATMAP_MAPCNT) { | ||
89 | if (iter->next != NULL) { | ||
90 | iter = iter->next; | ||
91 | node_idx = 0; | ||
92 | } else | ||
93 | return -ENOENT; | ||
94 | } | ||
95 | bitmap = iter->bitmap[node_idx]; | ||
96 | node_bit = 0; | ||
97 | } | ||
98 | |||
99 | return -ENOENT; | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * netlbl_secattr_catmap_walk_rng - Find the end of a string of set bits | ||
104 | * @catmap: the category bitmap | ||
105 | * @offset: the offset to start searching at, in bits | ||
106 | * | ||
107 | * Description: | ||
108 | * This function walks a LSM secattr category bitmap starting at @offset and | ||
109 | * returns the spot of the first cleared bit or -ENOENT if the offset is past | ||
110 | * the end of the bitmap. | ||
111 | * | ||
112 | */ | ||
113 | int netlbl_secattr_catmap_walk_rng(struct netlbl_lsm_secattr_catmap *catmap, | ||
114 | u32 offset) | ||
115 | { | ||
116 | struct netlbl_lsm_secattr_catmap *iter = catmap; | ||
117 | u32 node_idx; | ||
118 | u32 node_bit; | ||
119 | NETLBL_CATMAP_MAPTYPE bitmask; | ||
120 | NETLBL_CATMAP_MAPTYPE bitmap; | ||
121 | |||
122 | if (offset > iter->startbit) { | ||
123 | while (offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) { | ||
124 | iter = iter->next; | ||
125 | if (iter == NULL) | ||
126 | return -ENOENT; | ||
127 | } | ||
128 | node_idx = (offset - iter->startbit) / NETLBL_CATMAP_MAPSIZE; | ||
129 | node_bit = offset - iter->startbit - | ||
130 | (NETLBL_CATMAP_MAPSIZE * node_idx); | ||
131 | } else { | ||
132 | node_idx = 0; | ||
133 | node_bit = 0; | ||
134 | } | ||
135 | bitmask = NETLBL_CATMAP_BIT << node_bit; | ||
136 | |||
137 | for (;;) { | ||
138 | bitmap = iter->bitmap[node_idx]; | ||
139 | while (bitmask != 0 && (bitmap & bitmask) != 0) { | ||
140 | bitmask <<= 1; | ||
141 | node_bit++; | ||
142 | } | ||
143 | |||
144 | if (bitmask != 0) | ||
145 | return iter->startbit + | ||
146 | (NETLBL_CATMAP_MAPSIZE * node_idx) + | ||
147 | node_bit - 1; | ||
148 | else if (++node_idx >= NETLBL_CATMAP_MAPCNT) { | ||
149 | if (iter->next == NULL) | ||
150 | return iter->startbit + NETLBL_CATMAP_SIZE - 1; | ||
151 | iter = iter->next; | ||
152 | node_idx = 0; | ||
153 | } | ||
154 | bitmask = NETLBL_CATMAP_BIT; | ||
155 | node_bit = 0; | ||
156 | } | ||
157 | |||
158 | return -ENOENT; | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * netlbl_secattr_catmap_setbit - Set a bit in a LSM secattr catmap | ||
163 | * @catmap: the category bitmap | ||
164 | * @bit: the bit to set | ||
165 | * @flags: memory allocation flags | ||
166 | * | ||
167 | * Description: | ||
168 | * Set the bit specified by @bit in @catmap. Returns zero on success, | ||
169 | * negative values on failure. | ||
170 | * | ||
171 | */ | ||
172 | int netlbl_secattr_catmap_setbit(struct netlbl_lsm_secattr_catmap *catmap, | ||
173 | u32 bit, | ||
174 | gfp_t flags) | ||
175 | { | ||
176 | struct netlbl_lsm_secattr_catmap *iter = catmap; | ||
177 | u32 node_bit; | ||
178 | u32 node_idx; | ||
179 | |||
180 | while (iter->next != NULL && | ||
181 | bit >= (iter->startbit + NETLBL_CATMAP_SIZE)) | ||
182 | iter = iter->next; | ||
183 | if (bit >= (iter->startbit + NETLBL_CATMAP_SIZE)) { | ||
184 | iter->next = netlbl_secattr_catmap_alloc(flags); | ||
185 | if (iter->next == NULL) | ||
186 | return -ENOMEM; | ||
187 | iter = iter->next; | ||
188 | iter->startbit = bit & ~(NETLBL_CATMAP_SIZE - 1); | ||
189 | } | ||
190 | |||
191 | /* gcc always rounds to zero when doing integer division */ | ||
192 | node_idx = (bit - iter->startbit) / NETLBL_CATMAP_MAPSIZE; | ||
193 | node_bit = bit - iter->startbit - (NETLBL_CATMAP_MAPSIZE * node_idx); | ||
194 | iter->bitmap[node_idx] |= NETLBL_CATMAP_BIT << node_bit; | ||
195 | |||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * netlbl_secattr_catmap_setrng - Set a range of bits in a LSM secattr catmap | ||
201 | * @catmap: the category bitmap | ||
202 | * @start: the starting bit | ||
203 | * @end: the last bit in the string | ||
204 | * @flags: memory allocation flags | ||
205 | * | ||
206 | * Description: | ||
207 | * Set a range of bits, starting at @start and ending with @end. Returns zero | ||
208 | * on success, negative values on failure. | ||
209 | * | ||
210 | */ | ||
211 | int netlbl_secattr_catmap_setrng(struct netlbl_lsm_secattr_catmap *catmap, | ||
212 | u32 start, | ||
213 | u32 end, | ||
214 | gfp_t flags) | ||
215 | { | ||
216 | int ret_val = 0; | ||
217 | struct netlbl_lsm_secattr_catmap *iter = catmap; | ||
218 | u32 iter_max_spot; | ||
219 | u32 spot; | ||
220 | |||
221 | /* XXX - This could probably be made a bit faster by combining writes | ||
222 | * to the catmap instead of setting a single bit each time, but for | ||
223 | * right now skipping to the start of the range in the catmap should | ||
224 | * be a nice improvement over calling the individual setbit function | ||
225 | * repeatedly from a loop. */ | ||
226 | |||
227 | while (iter->next != NULL && | ||
228 | start >= (iter->startbit + NETLBL_CATMAP_SIZE)) | ||
229 | iter = iter->next; | ||
230 | iter_max_spot = iter->startbit + NETLBL_CATMAP_SIZE; | ||
231 | |||
232 | for (spot = start; spot <= end && ret_val == 0; spot++) { | ||
233 | if (spot >= iter_max_spot && iter->next != NULL) { | ||
234 | iter = iter->next; | ||
235 | iter_max_spot = iter->startbit + NETLBL_CATMAP_SIZE; | ||
236 | } | ||
237 | ret_val = netlbl_secattr_catmap_setbit(iter, spot, GFP_ATOMIC); | ||
238 | } | ||
239 | |||
240 | return ret_val; | ||
241 | } | ||
242 | |||
243 | /* | ||
43 | * LSM Functions | 244 | * LSM Functions |
44 | */ | 245 | */ |
45 | 246 | ||
@@ -62,6 +263,9 @@ int netlbl_socket_setattr(const struct socket *sock, | |||
62 | int ret_val = -ENOENT; | 263 | int ret_val = -ENOENT; |
63 | struct netlbl_dom_map *dom_entry; | 264 | struct netlbl_dom_map *dom_entry; |
64 | 265 | ||
266 | if ((secattr->flags & NETLBL_SECATTR_DOMAIN) == 0) | ||
267 | return -ENOENT; | ||
268 | |||
65 | rcu_read_lock(); | 269 | rcu_read_lock(); |
66 | dom_entry = netlbl_domhsh_getentry(secattr->domain); | 270 | dom_entry = netlbl_domhsh_getentry(secattr->domain); |
67 | if (dom_entry == NULL) | 271 | if (dom_entry == NULL) |
@@ -146,10 +350,8 @@ int netlbl_socket_getattr(const struct socket *sock, | |||
146 | int netlbl_skbuff_getattr(const struct sk_buff *skb, | 350 | int netlbl_skbuff_getattr(const struct sk_buff *skb, |
147 | struct netlbl_lsm_secattr *secattr) | 351 | struct netlbl_lsm_secattr *secattr) |
148 | { | 352 | { |
149 | int ret_val; | 353 | if (CIPSO_V4_OPTEXIST(skb) && |
150 | 354 | cipso_v4_skbuff_getattr(skb, secattr) == 0) | |
151 | ret_val = cipso_v4_skbuff_getattr(skb, secattr); | ||
152 | if (ret_val == 0) | ||
153 | return 0; | 355 | return 0; |
154 | 356 | ||
155 | return netlbl_unlabel_getattr(secattr); | 357 | return netlbl_unlabel_getattr(secattr); |
@@ -200,7 +402,7 @@ void netlbl_cache_invalidate(void) | |||
200 | int netlbl_cache_add(const struct sk_buff *skb, | 402 | int netlbl_cache_add(const struct sk_buff *skb, |
201 | const struct netlbl_lsm_secattr *secattr) | 403 | const struct netlbl_lsm_secattr *secattr) |
202 | { | 404 | { |
203 | if (secattr->cache == NULL) | 405 | if ((secattr->flags & NETLBL_SECATTR_CACHE) == 0) |
204 | return -ENOMSG; | 406 | return -ENOMSG; |
205 | 407 | ||
206 | if (CIPSO_V4_OPTEXIST(skb)) | 408 | if (CIPSO_V4_OPTEXIST(skb)) |
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c index 53c9079ad2c3..e8c80f33f3d7 100644 --- a/net/netlabel/netlabel_mgmt.c +++ b/net/netlabel/netlabel_mgmt.c | |||
@@ -188,12 +188,9 @@ static int netlbl_mgmt_listall_cb(struct netlbl_dom_map *entry, void *arg) | |||
188 | struct netlbl_domhsh_walk_arg *cb_arg = arg; | 188 | struct netlbl_domhsh_walk_arg *cb_arg = arg; |
189 | void *data; | 189 | void *data; |
190 | 190 | ||
191 | data = netlbl_netlink_hdr_put(cb_arg->skb, | 191 | data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).pid, |
192 | NETLINK_CB(cb_arg->nl_cb->skb).pid, | 192 | cb_arg->seq, &netlbl_mgmt_gnl_family, |
193 | cb_arg->seq, | 193 | NLM_F_MULTI, NLBL_MGMT_C_LISTALL); |
194 | netlbl_mgmt_gnl_family.id, | ||
195 | NLM_F_MULTI, | ||
196 | NLBL_MGMT_C_LISTALL); | ||
197 | if (data == NULL) | 194 | if (data == NULL) |
198 | goto listall_cb_failure; | 195 | goto listall_cb_failure; |
199 | 196 | ||
@@ -356,15 +353,11 @@ static int netlbl_mgmt_listdef(struct sk_buff *skb, struct genl_info *info) | |||
356 | void *data; | 353 | void *data; |
357 | struct netlbl_dom_map *entry; | 354 | struct netlbl_dom_map *entry; |
358 | 355 | ||
359 | ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | 356 | ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
360 | if (ans_skb == NULL) | 357 | if (ans_skb == NULL) |
361 | return -ENOMEM; | 358 | return -ENOMEM; |
362 | data = netlbl_netlink_hdr_put(ans_skb, | 359 | data = genlmsg_put_reply(ans_skb, info, &netlbl_mgmt_gnl_family, |
363 | info->snd_pid, | 360 | 0, NLBL_MGMT_C_LISTDEF); |
364 | info->snd_seq, | ||
365 | netlbl_mgmt_gnl_family.id, | ||
366 | 0, | ||
367 | NLBL_MGMT_C_LISTDEF); | ||
368 | if (data == NULL) | 361 | if (data == NULL) |
369 | goto listdef_failure; | 362 | goto listdef_failure; |
370 | 363 | ||
@@ -390,7 +383,7 @@ static int netlbl_mgmt_listdef(struct sk_buff *skb, struct genl_info *info) | |||
390 | 383 | ||
391 | genlmsg_end(ans_skb, data); | 384 | genlmsg_end(ans_skb, data); |
392 | 385 | ||
393 | ret_val = genlmsg_unicast(ans_skb, info->snd_pid); | 386 | ret_val = genlmsg_reply(ans_skb, info); |
394 | if (ret_val != 0) | 387 | if (ret_val != 0) |
395 | goto listdef_failure; | 388 | goto listdef_failure; |
396 | return 0; | 389 | return 0; |
@@ -422,12 +415,9 @@ static int netlbl_mgmt_protocols_cb(struct sk_buff *skb, | |||
422 | int ret_val = -ENOMEM; | 415 | int ret_val = -ENOMEM; |
423 | void *data; | 416 | void *data; |
424 | 417 | ||
425 | data = netlbl_netlink_hdr_put(skb, | 418 | data = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, |
426 | NETLINK_CB(cb->skb).pid, | 419 | &netlbl_mgmt_gnl_family, NLM_F_MULTI, |
427 | cb->nlh->nlmsg_seq, | 420 | NLBL_MGMT_C_PROTOCOLS); |
428 | netlbl_mgmt_gnl_family.id, | ||
429 | NLM_F_MULTI, | ||
430 | NLBL_MGMT_C_PROTOCOLS); | ||
431 | if (data == NULL) | 421 | if (data == NULL) |
432 | goto protocols_cb_failure; | 422 | goto protocols_cb_failure; |
433 | 423 | ||
@@ -492,15 +482,11 @@ static int netlbl_mgmt_version(struct sk_buff *skb, struct genl_info *info) | |||
492 | struct sk_buff *ans_skb = NULL; | 482 | struct sk_buff *ans_skb = NULL; |
493 | void *data; | 483 | void *data; |
494 | 484 | ||
495 | ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | 485 | ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
496 | if (ans_skb == NULL) | 486 | if (ans_skb == NULL) |
497 | return -ENOMEM; | 487 | return -ENOMEM; |
498 | data = netlbl_netlink_hdr_put(ans_skb, | 488 | data = genlmsg_put_reply(ans_skb, info, &netlbl_mgmt_gnl_family, |
499 | info->snd_pid, | 489 | 0, NLBL_MGMT_C_VERSION); |
500 | info->snd_seq, | ||
501 | netlbl_mgmt_gnl_family.id, | ||
502 | 0, | ||
503 | NLBL_MGMT_C_VERSION); | ||
504 | if (data == NULL) | 490 | if (data == NULL) |
505 | goto version_failure; | 491 | goto version_failure; |
506 | 492 | ||
@@ -512,7 +498,7 @@ static int netlbl_mgmt_version(struct sk_buff *skb, struct genl_info *info) | |||
512 | 498 | ||
513 | genlmsg_end(ans_skb, data); | 499 | genlmsg_end(ans_skb, data); |
514 | 500 | ||
515 | ret_val = genlmsg_unicast(ans_skb, info->snd_pid); | 501 | ret_val = genlmsg_reply(ans_skb, info); |
516 | if (ret_val != 0) | 502 | if (ret_val != 0) |
517 | goto version_failure; | 503 | goto version_failure; |
518 | return 0; | 504 | return 0; |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 1833ad233b39..5bc37181662e 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/socket.h> | 35 | #include <linux/socket.h> |
36 | #include <linux/string.h> | 36 | #include <linux/string.h> |
37 | #include <linux/skbuff.h> | 37 | #include <linux/skbuff.h> |
38 | #include <linux/audit.h> | ||
38 | #include <net/sock.h> | 39 | #include <net/sock.h> |
39 | #include <net/netlink.h> | 40 | #include <net/netlink.h> |
40 | #include <net/genetlink.h> | 41 | #include <net/genetlink.h> |
@@ -47,7 +48,8 @@ | |||
47 | #include "netlabel_unlabeled.h" | 48 | #include "netlabel_unlabeled.h" |
48 | 49 | ||
49 | /* Accept unlabeled packets flag */ | 50 | /* Accept unlabeled packets flag */ |
50 | static atomic_t netlabel_unlabel_accept_flg = ATOMIC_INIT(0); | 51 | static DEFINE_SPINLOCK(netlabel_unlabel_acceptflg_lock); |
52 | static u8 netlabel_unlabel_acceptflg = 0; | ||
51 | 53 | ||
52 | /* NetLabel Generic NETLINK CIPSOv4 family */ | 54 | /* NetLabel Generic NETLINK CIPSOv4 family */ |
53 | static struct genl_family netlbl_unlabel_gnl_family = { | 55 | static struct genl_family netlbl_unlabel_gnl_family = { |
@@ -82,13 +84,20 @@ static void netlbl_unlabel_acceptflg_set(u8 value, | |||
82 | struct audit_buffer *audit_buf; | 84 | struct audit_buffer *audit_buf; |
83 | u8 old_val; | 85 | u8 old_val; |
84 | 86 | ||
85 | old_val = atomic_read(&netlabel_unlabel_accept_flg); | 87 | rcu_read_lock(); |
86 | atomic_set(&netlabel_unlabel_accept_flg, value); | 88 | old_val = netlabel_unlabel_acceptflg; |
89 | spin_lock(&netlabel_unlabel_acceptflg_lock); | ||
90 | netlabel_unlabel_acceptflg = value; | ||
91 | spin_unlock(&netlabel_unlabel_acceptflg_lock); | ||
92 | rcu_read_unlock(); | ||
87 | 93 | ||
88 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW, | 94 | audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_ALLOW, |
89 | audit_info); | 95 | audit_info); |
90 | audit_log_format(audit_buf, " unlbl_accept=%u old=%u", value, old_val); | 96 | if (audit_buf != NULL) { |
91 | audit_log_end(audit_buf); | 97 | audit_log_format(audit_buf, |
98 | " unlbl_accept=%u old=%u", value, old_val); | ||
99 | audit_log_end(audit_buf); | ||
100 | } | ||
92 | } | 101 | } |
93 | 102 | ||
94 | /* | 103 | /* |
@@ -138,29 +147,27 @@ static int netlbl_unlabel_list(struct sk_buff *skb, struct genl_info *info) | |||
138 | struct sk_buff *ans_skb; | 147 | struct sk_buff *ans_skb; |
139 | void *data; | 148 | void *data; |
140 | 149 | ||
141 | ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | 150 | ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
142 | if (ans_skb == NULL) | 151 | if (ans_skb == NULL) |
143 | goto list_failure; | 152 | goto list_failure; |
144 | data = netlbl_netlink_hdr_put(ans_skb, | 153 | data = genlmsg_put_reply(ans_skb, info, &netlbl_unlabel_gnl_family, |
145 | info->snd_pid, | 154 | 0, NLBL_UNLABEL_C_LIST); |
146 | info->snd_seq, | ||
147 | netlbl_unlabel_gnl_family.id, | ||
148 | 0, | ||
149 | NLBL_UNLABEL_C_LIST); | ||
150 | if (data == NULL) { | 155 | if (data == NULL) { |
151 | ret_val = -ENOMEM; | 156 | ret_val = -ENOMEM; |
152 | goto list_failure; | 157 | goto list_failure; |
153 | } | 158 | } |
154 | 159 | ||
160 | rcu_read_lock(); | ||
155 | ret_val = nla_put_u8(ans_skb, | 161 | ret_val = nla_put_u8(ans_skb, |
156 | NLBL_UNLABEL_A_ACPTFLG, | 162 | NLBL_UNLABEL_A_ACPTFLG, |
157 | atomic_read(&netlabel_unlabel_accept_flg)); | 163 | netlabel_unlabel_acceptflg); |
164 | rcu_read_unlock(); | ||
158 | if (ret_val != 0) | 165 | if (ret_val != 0) |
159 | goto list_failure; | 166 | goto list_failure; |
160 | 167 | ||
161 | genlmsg_end(ans_skb, data); | 168 | genlmsg_end(ans_skb, data); |
162 | 169 | ||
163 | ret_val = genlmsg_unicast(ans_skb, info->snd_pid); | 170 | ret_val = genlmsg_reply(ans_skb, info); |
164 | if (ret_val != 0) | 171 | if (ret_val != 0) |
165 | goto list_failure; | 172 | goto list_failure; |
166 | return 0; | 173 | return 0; |
@@ -240,10 +247,17 @@ int netlbl_unlabel_genl_init(void) | |||
240 | */ | 247 | */ |
241 | int netlbl_unlabel_getattr(struct netlbl_lsm_secattr *secattr) | 248 | int netlbl_unlabel_getattr(struct netlbl_lsm_secattr *secattr) |
242 | { | 249 | { |
243 | if (atomic_read(&netlabel_unlabel_accept_flg) == 1) | 250 | int ret_val; |
244 | return netlbl_secattr_init(secattr); | ||
245 | 251 | ||
246 | return -ENOMSG; | 252 | rcu_read_lock(); |
253 | if (netlabel_unlabel_acceptflg == 1) { | ||
254 | netlbl_secattr_init(secattr); | ||
255 | ret_val = 0; | ||
256 | } else | ||
257 | ret_val = -ENOMSG; | ||
258 | rcu_read_unlock(); | ||
259 | |||
260 | return ret_val; | ||
247 | } | 261 | } |
248 | 262 | ||
249 | /** | 263 | /** |
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c index 98a416381e61..42f12bd65964 100644 --- a/net/netlabel/netlabel_user.c +++ b/net/netlabel/netlabel_user.c | |||
@@ -46,6 +46,10 @@ | |||
46 | #include "netlabel_cipso_v4.h" | 46 | #include "netlabel_cipso_v4.h" |
47 | #include "netlabel_user.h" | 47 | #include "netlabel_user.h" |
48 | 48 | ||
49 | /* do not do any auditing if audit_enabled == 0, see kernel/audit.c for | ||
50 | * details */ | ||
51 | extern int audit_enabled; | ||
52 | |||
49 | /* | 53 | /* |
50 | * NetLabel NETLINK Setup Functions | 54 | * NetLabel NETLINK Setup Functions |
51 | */ | 55 | */ |
@@ -101,6 +105,9 @@ struct audit_buffer *netlbl_audit_start_common(int type, | |||
101 | char *secctx; | 105 | char *secctx; |
102 | u32 secctx_len; | 106 | u32 secctx_len; |
103 | 107 | ||
108 | if (audit_enabled == 0) | ||
109 | return NULL; | ||
110 | |||
104 | audit_buf = audit_log_start(audit_ctx, GFP_ATOMIC, type); | 111 | audit_buf = audit_log_start(audit_ctx, GFP_ATOMIC, type); |
105 | if (audit_buf == NULL) | 112 | if (audit_buf == NULL) |
106 | return NULL; | 113 | return NULL; |
diff --git a/net/netlabel/netlabel_user.h b/net/netlabel/netlabel_user.h index 47967ef32964..6d7f4ab46c2b 100644 --- a/net/netlabel/netlabel_user.h +++ b/net/netlabel/netlabel_user.h | |||
@@ -42,37 +42,6 @@ | |||
42 | /* NetLabel NETLINK helper functions */ | 42 | /* NetLabel NETLINK helper functions */ |
43 | 43 | ||
44 | /** | 44 | /** |
45 | * netlbl_netlink_hdr_put - Write the NETLINK buffers into a sk_buff | ||
46 | * @skb: the packet | ||
47 | * @pid: the PID of the receipient | ||
48 | * @seq: the sequence number | ||
49 | * @type: the generic NETLINK message family type | ||
50 | * @cmd: command | ||
51 | * | ||
52 | * Description: | ||
53 | * Write both a NETLINK nlmsghdr structure and a Generic NETLINK genlmsghdr | ||
54 | * struct to the packet. Returns a pointer to the start of the payload buffer | ||
55 | * on success or NULL on failure. | ||
56 | * | ||
57 | */ | ||
58 | static inline void *netlbl_netlink_hdr_put(struct sk_buff *skb, | ||
59 | u32 pid, | ||
60 | u32 seq, | ||
61 | int type, | ||
62 | int flags, | ||
63 | u8 cmd) | ||
64 | { | ||
65 | return genlmsg_put(skb, | ||
66 | pid, | ||
67 | seq, | ||
68 | type, | ||
69 | 0, | ||
70 | flags, | ||
71 | cmd, | ||
72 | NETLBL_PROTO_VERSION); | ||
73 | } | ||
74 | |||
75 | /** | ||
76 | * netlbl_netlink_auditinfo - Fetch the audit information from a NETLINK msg | 45 | * netlbl_netlink_auditinfo - Fetch the audit information from a NETLINK msg |
77 | * @skb: the packet | 46 | * @skb: the packet |
78 | * @audit_info: NetLabel audit information | 47 | * @audit_info: NetLabel audit information |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index d527c8977b1f..3baafb10f8f3 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1148,12 +1148,11 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, | |||
1148 | if (len > sk->sk_sndbuf - 32) | 1148 | if (len > sk->sk_sndbuf - 32) |
1149 | goto out; | 1149 | goto out; |
1150 | err = -ENOBUFS; | 1150 | err = -ENOBUFS; |
1151 | skb = nlmsg_new(len, GFP_KERNEL); | 1151 | skb = alloc_skb(len, GFP_KERNEL); |
1152 | if (skb==NULL) | 1152 | if (skb==NULL) |
1153 | goto out; | 1153 | goto out; |
1154 | 1154 | ||
1155 | NETLINK_CB(skb).pid = nlk->pid; | 1155 | NETLINK_CB(skb).pid = nlk->pid; |
1156 | NETLINK_CB(skb).dst_pid = dst_pid; | ||
1157 | NETLINK_CB(skb).dst_group = dst_group; | 1156 | NETLINK_CB(skb).dst_group = dst_group; |
1158 | NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context); | 1157 | NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context); |
1159 | selinux_get_task_sid(current, &(NETLINK_CB(skb).sid)); | 1158 | selinux_get_task_sid(current, &(NETLINK_CB(skb).sid)); |
@@ -1435,14 +1434,13 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) | |||
1435 | struct sk_buff *skb; | 1434 | struct sk_buff *skb; |
1436 | struct nlmsghdr *rep; | 1435 | struct nlmsghdr *rep; |
1437 | struct nlmsgerr *errmsg; | 1436 | struct nlmsgerr *errmsg; |
1438 | int size; | 1437 | size_t payload = sizeof(*errmsg); |
1439 | 1438 | ||
1440 | if (err == 0) | 1439 | /* error messages get the original request appened */ |
1441 | size = nlmsg_total_size(sizeof(*errmsg)); | 1440 | if (err) |
1442 | else | 1441 | payload += nlmsg_len(nlh); |
1443 | size = nlmsg_total_size(sizeof(*errmsg) + nlmsg_len(nlh)); | ||
1444 | 1442 | ||
1445 | skb = nlmsg_new(size, GFP_KERNEL); | 1443 | skb = nlmsg_new(payload, GFP_KERNEL); |
1446 | if (!skb) { | 1444 | if (!skb) { |
1447 | struct sock *sk; | 1445 | struct sock *sk; |
1448 | 1446 | ||
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 49bc2db7982b..b9b03747c1f3 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -331,7 +331,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
331 | } | 331 | } |
332 | 332 | ||
333 | *errp = err = netlink_dump_start(genl_sock, skb, nlh, | 333 | *errp = err = netlink_dump_start(genl_sock, skb, nlh, |
334 | ops->dumpit, NULL); | 334 | ops->dumpit, ops->done); |
335 | if (err == 0) | 335 | if (err == 0) |
336 | skb_pull(skb, min(NLMSG_ALIGN(nlh->nlmsg_len), | 336 | skb_pull(skb, min(NLMSG_ALIGN(nlh->nlmsg_len), |
337 | skb->len)); | 337 | skb->len)); |
@@ -384,16 +384,19 @@ static void genl_rcv(struct sock *sk, int len) | |||
384 | * Controller | 384 | * Controller |
385 | **************************************************************************/ | 385 | **************************************************************************/ |
386 | 386 | ||
387 | static struct genl_family genl_ctrl = { | ||
388 | .id = GENL_ID_CTRL, | ||
389 | .name = "nlctrl", | ||
390 | .version = 0x1, | ||
391 | .maxattr = CTRL_ATTR_MAX, | ||
392 | }; | ||
393 | |||
387 | static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, | 394 | static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, |
388 | u32 flags, struct sk_buff *skb, u8 cmd) | 395 | u32 flags, struct sk_buff *skb, u8 cmd) |
389 | { | 396 | { |
390 | struct nlattr *nla_ops; | ||
391 | struct genl_ops *ops; | ||
392 | void *hdr; | 397 | void *hdr; |
393 | int idx = 1; | ||
394 | 398 | ||
395 | hdr = genlmsg_put(skb, pid, seq, GENL_ID_CTRL, 0, flags, cmd, | 399 | hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd); |
396 | family->version); | ||
397 | if (hdr == NULL) | 400 | if (hdr == NULL) |
398 | return -1; | 401 | return -1; |
399 | 402 | ||
@@ -403,33 +406,39 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, | |||
403 | NLA_PUT_U32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize); | 406 | NLA_PUT_U32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize); |
404 | NLA_PUT_U32(skb, CTRL_ATTR_MAXATTR, family->maxattr); | 407 | NLA_PUT_U32(skb, CTRL_ATTR_MAXATTR, family->maxattr); |
405 | 408 | ||
406 | nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS); | 409 | if (!list_empty(&family->ops_list)) { |
407 | if (nla_ops == NULL) | 410 | struct nlattr *nla_ops; |
408 | goto nla_put_failure; | 411 | struct genl_ops *ops; |
409 | 412 | int idx = 1; | |
410 | list_for_each_entry(ops, &family->ops_list, ops_list) { | ||
411 | struct nlattr *nest; | ||
412 | 413 | ||
413 | nest = nla_nest_start(skb, idx++); | 414 | nla_ops = nla_nest_start(skb, CTRL_ATTR_OPS); |
414 | if (nest == NULL) | 415 | if (nla_ops == NULL) |
415 | goto nla_put_failure; | 416 | goto nla_put_failure; |
416 | 417 | ||
417 | NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd); | 418 | list_for_each_entry(ops, &family->ops_list, ops_list) { |
418 | NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags); | 419 | struct nlattr *nest; |
419 | 420 | ||
420 | if (ops->policy) | 421 | nest = nla_nest_start(skb, idx++); |
421 | NLA_PUT_FLAG(skb, CTRL_ATTR_OP_POLICY); | 422 | if (nest == NULL) |
423 | goto nla_put_failure; | ||
422 | 424 | ||
423 | if (ops->doit) | 425 | NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd); |
424 | NLA_PUT_FLAG(skb, CTRL_ATTR_OP_DOIT); | 426 | NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags); |
425 | 427 | ||
426 | if (ops->dumpit) | 428 | if (ops->policy) |
427 | NLA_PUT_FLAG(skb, CTRL_ATTR_OP_DUMPIT); | 429 | NLA_PUT_FLAG(skb, CTRL_ATTR_OP_POLICY); |
428 | 430 | ||
429 | nla_nest_end(skb, nest); | 431 | if (ops->doit) |
430 | } | 432 | NLA_PUT_FLAG(skb, CTRL_ATTR_OP_DOIT); |
431 | 433 | ||
432 | nla_nest_end(skb, nla_ops); | 434 | if (ops->dumpit) |
435 | NLA_PUT_FLAG(skb, CTRL_ATTR_OP_DUMPIT); | ||
436 | |||
437 | nla_nest_end(skb, nest); | ||
438 | } | ||
439 | |||
440 | nla_nest_end(skb, nla_ops); | ||
441 | } | ||
433 | 442 | ||
434 | return genlmsg_end(skb, hdr); | 443 | return genlmsg_end(skb, hdr); |
435 | 444 | ||
@@ -480,7 +489,7 @@ static struct sk_buff *ctrl_build_msg(struct genl_family *family, u32 pid, | |||
480 | struct sk_buff *skb; | 489 | struct sk_buff *skb; |
481 | int err; | 490 | int err; |
482 | 491 | ||
483 | skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | 492 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
484 | if (skb == NULL) | 493 | if (skb == NULL) |
485 | return ERR_PTR(-ENOBUFS); | 494 | return ERR_PTR(-ENOBUFS); |
486 | 495 | ||
@@ -529,7 +538,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info) | |||
529 | goto errout; | 538 | goto errout; |
530 | } | 539 | } |
531 | 540 | ||
532 | err = genlmsg_unicast(msg, info->snd_pid); | 541 | err = genlmsg_reply(msg, info); |
533 | errout: | 542 | errout: |
534 | return err; | 543 | return err; |
535 | } | 544 | } |
@@ -562,13 +571,6 @@ static struct genl_ops genl_ctrl_ops = { | |||
562 | .policy = ctrl_policy, | 571 | .policy = ctrl_policy, |
563 | }; | 572 | }; |
564 | 573 | ||
565 | static struct genl_family genl_ctrl = { | ||
566 | .id = GENL_ID_CTRL, | ||
567 | .name = "nlctrl", | ||
568 | .version = 0x1, | ||
569 | .maxattr = CTRL_ATTR_MAX, | ||
570 | }; | ||
571 | |||
572 | static int __init genl_init(void) | 574 | static int __init genl_init(void) |
573 | { | 575 | { |
574 | int i, err; | 576 | int i, err; |
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index c11737f472d6..0096105bcd47 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c | |||
@@ -155,14 +155,15 @@ static int nr_add_node(ax25_address *nr, const char *mnemonic, ax25_address *ax2 | |||
155 | atomic_set(&nr_neigh->refcount, 1); | 155 | atomic_set(&nr_neigh->refcount, 1); |
156 | 156 | ||
157 | if (ax25_digi != NULL && ax25_digi->ndigi > 0) { | 157 | if (ax25_digi != NULL && ax25_digi->ndigi > 0) { |
158 | if ((nr_neigh->digipeat = kmalloc(sizeof(*ax25_digi), GFP_KERNEL)) == NULL) { | 158 | nr_neigh->digipeat = kmemdup(ax25_digi, |
159 | sizeof(*ax25_digi), | ||
160 | GFP_KERNEL); | ||
161 | if (nr_neigh->digipeat == NULL) { | ||
159 | kfree(nr_neigh); | 162 | kfree(nr_neigh); |
160 | if (nr_node) | 163 | if (nr_node) |
161 | nr_node_put(nr_node); | 164 | nr_node_put(nr_node); |
162 | return -ENOMEM; | 165 | return -ENOMEM; |
163 | } | 166 | } |
164 | memcpy(nr_neigh->digipeat, ax25_digi, | ||
165 | sizeof(*ax25_digi)); | ||
166 | } | 167 | } |
167 | 168 | ||
168 | spin_lock_bh(&nr_neigh_list_lock); | 169 | spin_lock_bh(&nr_neigh_list_lock); |
@@ -432,11 +433,12 @@ static int nr_add_neigh(ax25_address *callsign, ax25_digi *ax25_digi, struct net | |||
432 | atomic_set(&nr_neigh->refcount, 1); | 433 | atomic_set(&nr_neigh->refcount, 1); |
433 | 434 | ||
434 | if (ax25_digi != NULL && ax25_digi->ndigi > 0) { | 435 | if (ax25_digi != NULL && ax25_digi->ndigi > 0) { |
435 | if ((nr_neigh->digipeat = kmalloc(sizeof(*ax25_digi), GFP_KERNEL)) == NULL) { | 436 | nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi), |
437 | GFP_KERNEL); | ||
438 | if (nr_neigh->digipeat == NULL) { | ||
436 | kfree(nr_neigh); | 439 | kfree(nr_neigh); |
437 | return -ENOMEM; | 440 | return -ENOMEM; |
438 | } | 441 | } |
439 | memcpy(nr_neigh->digipeat, ax25_digi, sizeof(*ax25_digi)); | ||
440 | } | 442 | } |
441 | 443 | ||
442 | spin_lock_bh(&nr_neigh_list_lock); | 444 | spin_lock_bh(&nr_neigh_list_lock); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f4ccb90e6739..271d2eed0699 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -201,7 +201,7 @@ struct packet_sock { | |||
201 | spinlock_t bind_lock; | 201 | spinlock_t bind_lock; |
202 | char running; /* prot_hook is attached*/ | 202 | char running; /* prot_hook is attached*/ |
203 | int ifindex; /* bound device */ | 203 | int ifindex; /* bound device */ |
204 | unsigned short num; | 204 | __be16 num; |
205 | #ifdef CONFIG_PACKET_MULTICAST | 205 | #ifdef CONFIG_PACKET_MULTICAST |
206 | struct packet_mclist *mclist; | 206 | struct packet_mclist *mclist; |
207 | #endif | 207 | #endif |
@@ -331,7 +331,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
331 | struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name; | 331 | struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name; |
332 | struct sk_buff *skb; | 332 | struct sk_buff *skb; |
333 | struct net_device *dev; | 333 | struct net_device *dev; |
334 | unsigned short proto=0; | 334 | __be16 proto=0; |
335 | int err; | 335 | int err; |
336 | 336 | ||
337 | /* | 337 | /* |
@@ -704,7 +704,7 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
704 | struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name; | 704 | struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name; |
705 | struct sk_buff *skb; | 705 | struct sk_buff *skb; |
706 | struct net_device *dev; | 706 | struct net_device *dev; |
707 | unsigned short proto; | 707 | __be16 proto; |
708 | unsigned char *addr; | 708 | unsigned char *addr; |
709 | int ifindex, err, reserve = 0; | 709 | int ifindex, err, reserve = 0; |
710 | 710 | ||
@@ -858,7 +858,7 @@ static int packet_release(struct socket *sock) | |||
858 | * Attach a packet hook. | 858 | * Attach a packet hook. |
859 | */ | 859 | */ |
860 | 860 | ||
861 | static int packet_do_bind(struct sock *sk, struct net_device *dev, int protocol) | 861 | static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol) |
862 | { | 862 | { |
863 | struct packet_sock *po = pkt_sk(sk); | 863 | struct packet_sock *po = pkt_sk(sk); |
864 | /* | 864 | /* |
@@ -983,6 +983,7 @@ static int packet_create(struct socket *sock, int protocol) | |||
983 | { | 983 | { |
984 | struct sock *sk; | 984 | struct sock *sk; |
985 | struct packet_sock *po; | 985 | struct packet_sock *po; |
986 | __be16 proto = (__force __be16)protocol; /* weird, but documented */ | ||
986 | int err; | 987 | int err; |
987 | 988 | ||
988 | if (!capable(CAP_NET_RAW)) | 989 | if (!capable(CAP_NET_RAW)) |
@@ -1010,7 +1011,7 @@ static int packet_create(struct socket *sock, int protocol) | |||
1010 | 1011 | ||
1011 | po = pkt_sk(sk); | 1012 | po = pkt_sk(sk); |
1012 | sk->sk_family = PF_PACKET; | 1013 | sk->sk_family = PF_PACKET; |
1013 | po->num = protocol; | 1014 | po->num = proto; |
1014 | 1015 | ||
1015 | sk->sk_destruct = packet_sock_destruct; | 1016 | sk->sk_destruct = packet_sock_destruct; |
1016 | atomic_inc(&packet_socks_nr); | 1017 | atomic_inc(&packet_socks_nr); |
@@ -1027,8 +1028,8 @@ static int packet_create(struct socket *sock, int protocol) | |||
1027 | #endif | 1028 | #endif |
1028 | po->prot_hook.af_packet_priv = sk; | 1029 | po->prot_hook.af_packet_priv = sk; |
1029 | 1030 | ||
1030 | if (protocol) { | 1031 | if (proto) { |
1031 | po->prot_hook.type = protocol; | 1032 | po->prot_hook.type = proto; |
1032 | dev_add_pack(&po->prot_hook); | 1033 | dev_add_pack(&po->prot_hook); |
1033 | sock_hold(sk); | 1034 | sock_hold(sk); |
1034 | po->running = 1; | 1035 | po->running = 1; |
@@ -1624,7 +1625,8 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing | |||
1624 | { | 1625 | { |
1625 | char **pg_vec = NULL; | 1626 | char **pg_vec = NULL; |
1626 | struct packet_sock *po = pkt_sk(sk); | 1627 | struct packet_sock *po = pkt_sk(sk); |
1627 | int was_running, num, order = 0; | 1628 | int was_running, order = 0; |
1629 | __be16 num; | ||
1628 | int err = 0; | 1630 | int err = 0; |
1629 | 1631 | ||
1630 | if (req->tp_block_nr) { | 1632 | if (req->tp_block_nr) { |
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index a22542fa1bc8..7252344779a0 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c | |||
@@ -396,7 +396,7 @@ int rose_add_loopback_neigh(void) | |||
396 | int rose_add_loopback_node(rose_address *address) | 396 | int rose_add_loopback_node(rose_address *address) |
397 | { | 397 | { |
398 | struct rose_node *rose_node; | 398 | struct rose_node *rose_node; |
399 | unsigned int err = 0; | 399 | int err = 0; |
400 | 400 | ||
401 | spin_lock_bh(&rose_node_list_lock); | 401 | spin_lock_bh(&rose_node_list_lock); |
402 | 402 | ||
@@ -432,7 +432,7 @@ int rose_add_loopback_node(rose_address *address) | |||
432 | out: | 432 | out: |
433 | spin_unlock_bh(&rose_node_list_lock); | 433 | spin_unlock_bh(&rose_node_list_lock); |
434 | 434 | ||
435 | return 0; | 435 | return err; |
436 | } | 436 | } |
437 | 437 | ||
438 | /* | 438 | /* |
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c index 94b2e2fe6fdb..4268b38d92d2 100644 --- a/net/rxrpc/transport.c +++ b/net/rxrpc/transport.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #endif | 31 | #endif |
32 | #include <linux/errqueue.h> | 32 | #include <linux/errqueue.h> |
33 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
34 | #include <asm/checksum.h> | ||
35 | #include "internal.h" | 34 | #include "internal.h" |
36 | 35 | ||
37 | struct errormsg { | 36 | struct errormsg { |
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 8298ea9ffe19..f4544dd86476 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
@@ -6,6 +6,7 @@ menu "QoS and/or fair queueing" | |||
6 | 6 | ||
7 | config NET_SCHED | 7 | config NET_SCHED |
8 | bool "QoS and/or fair queueing" | 8 | bool "QoS and/or fair queueing" |
9 | select NET_SCH_FIFO | ||
9 | ---help--- | 10 | ---help--- |
10 | When the kernel has several packets to send out over a network | 11 | When the kernel has several packets to send out over a network |
11 | device, it has to decide which ones to send first, which ones to | 12 | device, it has to decide which ones to send first, which ones to |
@@ -40,6 +41,9 @@ config NET_SCHED | |||
40 | The available schedulers are listed in the following questions; you | 41 | The available schedulers are listed in the following questions; you |
41 | can say Y to as many as you like. If unsure, say N now. | 42 | can say Y to as many as you like. If unsure, say N now. |
42 | 43 | ||
44 | config NET_SCH_FIFO | ||
45 | bool | ||
46 | |||
43 | if NET_SCHED | 47 | if NET_SCHED |
44 | 48 | ||
45 | choice | 49 | choice |
@@ -320,7 +324,7 @@ config CLS_U32_PERF | |||
320 | 324 | ||
321 | config CLS_U32_MARK | 325 | config CLS_U32_MARK |
322 | bool "Netfilter marks support" | 326 | bool "Netfilter marks support" |
323 | depends on NET_CLS_U32 && NETFILTER | 327 | depends on NET_CLS_U32 |
324 | ---help--- | 328 | ---help--- |
325 | Say Y here to be able to use netfilter marks as u32 key. | 329 | Say Y here to be able to use netfilter marks as u32 key. |
326 | 330 | ||
diff --git a/net/sched/Makefile b/net/sched/Makefile index 0f06aec66094..ff2d6e5e282c 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | obj-y := sch_generic.o | 5 | obj-y := sch_generic.o |
6 | 6 | ||
7 | obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o sch_blackhole.o | 7 | obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o |
8 | obj-$(CONFIG_NET_CLS) += cls_api.o | 8 | obj-$(CONFIG_NET_CLS) += cls_api.o |
9 | obj-$(CONFIG_NET_CLS_ACT) += act_api.o | 9 | obj-$(CONFIG_NET_CLS_ACT) += act_api.o |
10 | obj-$(CONFIG_NET_ACT_POLICE) += act_police.o | 10 | obj-$(CONFIG_NET_ACT_POLICE) += act_police.o |
@@ -14,6 +14,7 @@ obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o | |||
14 | obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o | 14 | obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o |
15 | obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o | 15 | obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o |
16 | obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o | 16 | obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o |
17 | obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o | ||
17 | obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o | 18 | obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o |
18 | obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o | 19 | obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o |
19 | obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o | 20 | obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o |
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 6cff56696a81..85de7efd5fea 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
@@ -48,14 +48,14 @@ static struct tcf_hashinfo gact_hash_info = { | |||
48 | #ifdef CONFIG_GACT_PROB | 48 | #ifdef CONFIG_GACT_PROB |
49 | static int gact_net_rand(struct tcf_gact *gact) | 49 | static int gact_net_rand(struct tcf_gact *gact) |
50 | { | 50 | { |
51 | if (net_random() % gact->tcfg_pval) | 51 | if (!gact->tcfg_pval || net_random() % gact->tcfg_pval) |
52 | return gact->tcf_action; | 52 | return gact->tcf_action; |
53 | return gact->tcfg_paction; | 53 | return gact->tcfg_paction; |
54 | } | 54 | } |
55 | 55 | ||
56 | static int gact_determ(struct tcf_gact *gact) | 56 | static int gact_determ(struct tcf_gact *gact) |
57 | { | 57 | { |
58 | if (gact->tcf_bstats.packets % gact->tcfg_pval) | 58 | if (!gact->tcfg_pval || gact->tcf_bstats.packets % gact->tcfg_pval) |
59 | return gact->tcf_action; | 59 | return gact->tcf_action; |
60 | return gact->tcfg_paction; | 60 | return gact->tcfg_paction; |
61 | } | 61 | } |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index d8c9310da6e5..a9608064a4c3 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -156,10 +156,9 @@ static int tcf_ipt_init(struct rtattr *rta, struct rtattr *est, | |||
156 | rtattr_strlcpy(tname, tb[TCA_IPT_TABLE-1], IFNAMSIZ) >= IFNAMSIZ) | 156 | rtattr_strlcpy(tname, tb[TCA_IPT_TABLE-1], IFNAMSIZ) >= IFNAMSIZ) |
157 | strcpy(tname, "mangle"); | 157 | strcpy(tname, "mangle"); |
158 | 158 | ||
159 | t = kmalloc(td->u.target_size, GFP_KERNEL); | 159 | t = kmemdup(td, td->u.target_size, GFP_KERNEL); |
160 | if (unlikely(!t)) | 160 | if (unlikely(!t)) |
161 | goto err2; | 161 | goto err2; |
162 | memcpy(t, td, td->u.target_size); | ||
163 | 162 | ||
164 | if ((err = ipt_init_target(t, tname, hook)) < 0) | 163 | if ((err = ipt_init_target(t, tname, hook)) < 0) |
165 | goto err3; | 164 | goto err3; |
@@ -256,13 +255,12 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int | |||
256 | ** for foolproof you need to not assume this | 255 | ** for foolproof you need to not assume this |
257 | */ | 256 | */ |
258 | 257 | ||
259 | t = kmalloc(ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); | 258 | t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); |
260 | if (unlikely(!t)) | 259 | if (unlikely(!t)) |
261 | goto rtattr_failure; | 260 | goto rtattr_failure; |
262 | 261 | ||
263 | c.bindcnt = ipt->tcf_bindcnt - bind; | 262 | c.bindcnt = ipt->tcf_bindcnt - bind; |
264 | c.refcnt = ipt->tcf_refcnt - ref; | 263 | c.refcnt = ipt->tcf_refcnt - ref; |
265 | memcpy(t, ipt->tcfi_t, ipt->tcfi_t->u.user.target_size); | ||
266 | strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); | 264 | strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); |
267 | 265 | ||
268 | RTA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t); | 266 | RTA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t); |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index fed47b658837..af68e1e83251 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -46,6 +46,18 @@ static struct tcf_hashinfo police_hash_info = { | |||
46 | .lock = &police_lock, | 46 | .lock = &police_lock, |
47 | }; | 47 | }; |
48 | 48 | ||
49 | /* old policer structure from before tc actions */ | ||
50 | struct tc_police_compat | ||
51 | { | ||
52 | u32 index; | ||
53 | int action; | ||
54 | u32 limit; | ||
55 | u32 burst; | ||
56 | u32 mtu; | ||
57 | struct tc_ratespec rate; | ||
58 | struct tc_ratespec peakrate; | ||
59 | }; | ||
60 | |||
49 | /* Each policer is serialized by its individual spinlock */ | 61 | /* Each policer is serialized by its individual spinlock */ |
50 | 62 | ||
51 | #ifdef CONFIG_NET_CLS_ACT | 63 | #ifdef CONFIG_NET_CLS_ACT |
@@ -131,12 +143,15 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est, | |||
131 | struct tc_police *parm; | 143 | struct tc_police *parm; |
132 | struct tcf_police *police; | 144 | struct tcf_police *police; |
133 | struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; | 145 | struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; |
146 | int size; | ||
134 | 147 | ||
135 | if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0) | 148 | if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0) |
136 | return -EINVAL; | 149 | return -EINVAL; |
137 | 150 | ||
138 | if (tb[TCA_POLICE_TBF-1] == NULL || | 151 | if (tb[TCA_POLICE_TBF-1] == NULL) |
139 | RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]) != sizeof(*parm)) | 152 | return -EINVAL; |
153 | size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]); | ||
154 | if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) | ||
140 | return -EINVAL; | 155 | return -EINVAL; |
141 | parm = RTA_DATA(tb[TCA_POLICE_TBF-1]); | 156 | parm = RTA_DATA(tb[TCA_POLICE_TBF-1]); |
142 | 157 | ||
@@ -415,12 +430,15 @@ struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est) | |||
415 | struct tcf_police *police; | 430 | struct tcf_police *police; |
416 | struct rtattr *tb[TCA_POLICE_MAX]; | 431 | struct rtattr *tb[TCA_POLICE_MAX]; |
417 | struct tc_police *parm; | 432 | struct tc_police *parm; |
433 | int size; | ||
418 | 434 | ||
419 | if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0) | 435 | if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0) |
420 | return NULL; | 436 | return NULL; |
421 | 437 | ||
422 | if (tb[TCA_POLICE_TBF-1] == NULL || | 438 | if (tb[TCA_POLICE_TBF-1] == NULL) |
423 | RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]) != sizeof(*parm)) | 439 | return NULL; |
440 | size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]); | ||
441 | if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) | ||
424 | return NULL; | 442 | return NULL; |
425 | 443 | ||
426 | parm = RTA_DATA(tb[TCA_POLICE_TBF-1]); | 444 | parm = RTA_DATA(tb[TCA_POLICE_TBF-1]); |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 901571a67707..5fe80854ca91 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -71,11 +71,10 @@ static int tcf_simp_release(struct tcf_defact *d, int bind) | |||
71 | 71 | ||
72 | static int alloc_defdata(struct tcf_defact *d, u32 datalen, void *defdata) | 72 | static int alloc_defdata(struct tcf_defact *d, u32 datalen, void *defdata) |
73 | { | 73 | { |
74 | d->tcfd_defdata = kmalloc(datalen, GFP_KERNEL); | 74 | d->tcfd_defdata = kmemdup(defdata, datalen, GFP_KERNEL); |
75 | if (unlikely(!d->tcfd_defdata)) | 75 | if (unlikely(!d->tcfd_defdata)) |
76 | return -ENOMEM; | 76 | return -ENOMEM; |
77 | d->tcfd_datalen = datalen; | 77 | d->tcfd_datalen = datalen; |
78 | memcpy(d->tcfd_defdata, defdata, datalen); | ||
79 | return 0; | 78 | return 0; |
80 | } | 79 | } |
81 | 80 | ||
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 37a184021647..edb8fc97ae11 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -217,7 +217,7 @@ replay: | |||
217 | /* Create new proto tcf */ | 217 | /* Create new proto tcf */ |
218 | 218 | ||
219 | err = -ENOBUFS; | 219 | err = -ENOBUFS; |
220 | if ((tp = kmalloc(sizeof(*tp), GFP_KERNEL)) == NULL) | 220 | if ((tp = kzalloc(sizeof(*tp), GFP_KERNEL)) == NULL) |
221 | goto errout; | 221 | goto errout; |
222 | err = -EINVAL; | 222 | err = -EINVAL; |
223 | tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND-1]); | 223 | tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND-1]); |
@@ -247,7 +247,6 @@ replay: | |||
247 | kfree(tp); | 247 | kfree(tp); |
248 | goto errout; | 248 | goto errout; |
249 | } | 249 | } |
250 | memset(tp, 0, sizeof(*tp)); | ||
251 | tp->ops = tp_ops; | 250 | tp->ops = tp_ops; |
252 | tp->protocol = protocol; | 251 | tp->protocol = protocol; |
253 | tp->prio = nprio ? : tcf_auto_prio(*back); | 252 | tp->prio = nprio ? : tcf_auto_prio(*back); |
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index e54acc6bcccd..f59a2c4aa039 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c | |||
@@ -101,11 +101,7 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
101 | struct fw_head *head = (struct fw_head*)tp->root; | 101 | struct fw_head *head = (struct fw_head*)tp->root; |
102 | struct fw_filter *f; | 102 | struct fw_filter *f; |
103 | int r; | 103 | int r; |
104 | #ifdef CONFIG_NETFILTER | 104 | u32 id = skb->mark & head->mask; |
105 | u32 id = skb->nfmark & head->mask; | ||
106 | #else | ||
107 | u32 id = 0; | ||
108 | #endif | ||
109 | 105 | ||
110 | if (head != NULL) { | 106 | if (head != NULL) { |
111 | for (f=head->ht[fw_hash(id)]; f; f=f->next) { | 107 | for (f=head->ht[fw_hash(id)]; f; f=f->next) { |
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 6e230ecfba05..587b9adab38c 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h | |||
@@ -77,7 +77,7 @@ struct rsvp_head | |||
77 | struct rsvp_session | 77 | struct rsvp_session |
78 | { | 78 | { |
79 | struct rsvp_session *next; | 79 | struct rsvp_session *next; |
80 | u32 dst[RSVP_DST_LEN]; | 80 | __be32 dst[RSVP_DST_LEN]; |
81 | struct tc_rsvp_gpi dpi; | 81 | struct tc_rsvp_gpi dpi; |
82 | u8 protocol; | 82 | u8 protocol; |
83 | u8 tunnelid; | 83 | u8 tunnelid; |
@@ -89,7 +89,7 @@ struct rsvp_session | |||
89 | struct rsvp_filter | 89 | struct rsvp_filter |
90 | { | 90 | { |
91 | struct rsvp_filter *next; | 91 | struct rsvp_filter *next; |
92 | u32 src[RSVP_DST_LEN]; | 92 | __be32 src[RSVP_DST_LEN]; |
93 | struct tc_rsvp_gpi spi; | 93 | struct tc_rsvp_gpi spi; |
94 | u8 tunnelhdr; | 94 | u8 tunnelhdr; |
95 | 95 | ||
@@ -100,17 +100,17 @@ struct rsvp_filter | |||
100 | struct rsvp_session *sess; | 100 | struct rsvp_session *sess; |
101 | }; | 101 | }; |
102 | 102 | ||
103 | static __inline__ unsigned hash_dst(u32 *dst, u8 protocol, u8 tunnelid) | 103 | static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) |
104 | { | 104 | { |
105 | unsigned h = dst[RSVP_DST_LEN-1]; | 105 | unsigned h = (__force __u32)dst[RSVP_DST_LEN-1]; |
106 | h ^= h>>16; | 106 | h ^= h>>16; |
107 | h ^= h>>8; | 107 | h ^= h>>8; |
108 | return (h ^ protocol ^ tunnelid) & 0xFF; | 108 | return (h ^ protocol ^ tunnelid) & 0xFF; |
109 | } | 109 | } |
110 | 110 | ||
111 | static __inline__ unsigned hash_src(u32 *src) | 111 | static __inline__ unsigned hash_src(__be32 *src) |
112 | { | 112 | { |
113 | unsigned h = src[RSVP_DST_LEN-1]; | 113 | unsigned h = (__force __u32)src[RSVP_DST_LEN-1]; |
114 | h ^= h>>16; | 114 | h ^= h>>16; |
115 | h ^= h>>8; | 115 | h ^= h>>8; |
116 | h ^= h>>4; | 116 | h ^= h>>4; |
@@ -138,7 +138,7 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
138 | struct rsvp_session *s; | 138 | struct rsvp_session *s; |
139 | struct rsvp_filter *f; | 139 | struct rsvp_filter *f; |
140 | unsigned h1, h2; | 140 | unsigned h1, h2; |
141 | u32 *dst, *src; | 141 | __be32 *dst, *src; |
142 | u8 protocol; | 142 | u8 protocol; |
143 | u8 tunnelid = 0; | 143 | u8 tunnelid = 0; |
144 | u8 *xprt; | 144 | u8 *xprt; |
@@ -410,7 +410,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, | |||
410 | struct rtattr *tb[TCA_RSVP_MAX]; | 410 | struct rtattr *tb[TCA_RSVP_MAX]; |
411 | struct tcf_exts e; | 411 | struct tcf_exts e; |
412 | unsigned h1, h2; | 412 | unsigned h1, h2; |
413 | u32 *dst; | 413 | __be32 *dst; |
414 | int err; | 414 | int err; |
415 | 415 | ||
416 | if (opt == NULL) | 416 | if (opt == NULL) |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 0a6cfa0005be..8b5194801995 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -143,7 +143,7 @@ next_knode: | |||
143 | #endif | 143 | #endif |
144 | 144 | ||
145 | #ifdef CONFIG_CLS_U32_MARK | 145 | #ifdef CONFIG_CLS_U32_MARK |
146 | if ((skb->nfmark & n->mark.mask) != n->mark.val) { | 146 | if ((skb->mark & n->mark.mask) != n->mark.val) { |
147 | n = n->next; | 147 | n = n->next; |
148 | goto next_knode; | 148 | goto next_knode; |
149 | } else { | 149 | } else { |
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 61e3b740ab1a..45d47d37155e 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c | |||
@@ -208,13 +208,9 @@ META_COLLECTOR(int_maclen) | |||
208 | * Netfilter | 208 | * Netfilter |
209 | **************************************************************************/ | 209 | **************************************************************************/ |
210 | 210 | ||
211 | META_COLLECTOR(int_nfmark) | 211 | META_COLLECTOR(int_mark) |
212 | { | 212 | { |
213 | #ifdef CONFIG_NETFILTER | 213 | dst->value = skb->mark; |
214 | dst->value = skb->nfmark; | ||
215 | #else | ||
216 | dst->value = 0; | ||
217 | #endif | ||
218 | } | 214 | } |
219 | 215 | ||
220 | /************************************************************************** | 216 | /************************************************************************** |
@@ -490,7 +486,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { | |||
490 | [META_ID(PKTLEN)] = META_FUNC(int_pktlen), | 486 | [META_ID(PKTLEN)] = META_FUNC(int_pktlen), |
491 | [META_ID(DATALEN)] = META_FUNC(int_datalen), | 487 | [META_ID(DATALEN)] = META_FUNC(int_datalen), |
492 | [META_ID(MACLEN)] = META_FUNC(int_maclen), | 488 | [META_ID(MACLEN)] = META_FUNC(int_maclen), |
493 | [META_ID(NFMARK)] = META_FUNC(int_nfmark), | 489 | [META_ID(NFMARK)] = META_FUNC(int_mark), |
494 | [META_ID(TCINDEX)] = META_FUNC(int_tcindex), | 490 | [META_ID(TCINDEX)] = META_FUNC(int_tcindex), |
495 | [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid), | 491 | [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid), |
496 | [META_ID(RTIIF)] = META_FUNC(int_rtiif), | 492 | [META_ID(RTIIF)] = META_FUNC(int_rtiif), |
@@ -550,10 +546,9 @@ static int meta_var_change(struct meta_value *dst, struct rtattr *rta) | |||
550 | { | 546 | { |
551 | int len = RTA_PAYLOAD(rta); | 547 | int len = RTA_PAYLOAD(rta); |
552 | 548 | ||
553 | dst->val = (unsigned long) kmalloc(len, GFP_KERNEL); | 549 | dst->val = (unsigned long)kmemdup(RTA_DATA(rta), len, GFP_KERNEL); |
554 | if (dst->val == 0UL) | 550 | if (dst->val == 0UL) |
555 | return -ENOMEM; | 551 | return -ENOMEM; |
556 | memcpy((void *) dst->val, RTA_DATA(rta), len); | ||
557 | dst->len = len; | 552 | dst->len = len; |
558 | return 0; | 553 | return 0; |
559 | } | 554 | } |
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c index cc80babfd79f..005db409be64 100644 --- a/net/sched/em_nbyte.c +++ b/net/sched/em_nbyte.c | |||
@@ -34,12 +34,10 @@ static int em_nbyte_change(struct tcf_proto *tp, void *data, int data_len, | |||
34 | return -EINVAL; | 34 | return -EINVAL; |
35 | 35 | ||
36 | em->datalen = sizeof(*nbyte) + nbyte->len; | 36 | em->datalen = sizeof(*nbyte) + nbyte->len; |
37 | em->data = (unsigned long) kmalloc(em->datalen, GFP_KERNEL); | 37 | em->data = (unsigned long)kmemdup(data, em->datalen, GFP_KERNEL); |
38 | if (em->data == 0UL) | 38 | if (em->data == 0UL) |
39 | return -ENOBUFS; | 39 | return -ENOBUFS; |
40 | 40 | ||
41 | memcpy((void *) em->data, data, em->datalen); | ||
42 | |||
43 | return 0; | 41 | return 0; |
44 | } | 42 | } |
45 | 43 | ||
diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 0fd0768a17c6..8f8a16da72a8 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c | |||
@@ -251,12 +251,11 @@ static int tcf_em_validate(struct tcf_proto *tp, | |||
251 | goto errout; | 251 | goto errout; |
252 | em->data = *(u32 *) data; | 252 | em->data = *(u32 *) data; |
253 | } else { | 253 | } else { |
254 | void *v = kmalloc(data_len, GFP_KERNEL); | 254 | void *v = kmemdup(data, data_len, GFP_KERNEL); |
255 | if (v == NULL) { | 255 | if (v == NULL) { |
256 | err = -ENOBUFS; | 256 | err = -ENOBUFS; |
257 | goto errout; | 257 | goto errout; |
258 | } | 258 | } |
259 | memcpy(v, data, data_len); | ||
260 | em->data = (unsigned long) v; | 259 | em->data = (unsigned long) v; |
261 | } | 260 | } |
262 | } | 261 | } |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 0b6489291140..65825f4409d9 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -191,21 +191,27 @@ int unregister_qdisc(struct Qdisc_ops *qops) | |||
191 | (root qdisc, all its children, children of children etc.) | 191 | (root qdisc, all its children, children of children etc.) |
192 | */ | 192 | */ |
193 | 193 | ||
194 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) | 194 | static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle) |
195 | { | 195 | { |
196 | struct Qdisc *q; | 196 | struct Qdisc *q; |
197 | 197 | ||
198 | read_lock(&qdisc_tree_lock); | ||
199 | list_for_each_entry(q, &dev->qdisc_list, list) { | 198 | list_for_each_entry(q, &dev->qdisc_list, list) { |
200 | if (q->handle == handle) { | 199 | if (q->handle == handle) |
201 | read_unlock(&qdisc_tree_lock); | ||
202 | return q; | 200 | return q; |
203 | } | ||
204 | } | 201 | } |
205 | read_unlock(&qdisc_tree_lock); | ||
206 | return NULL; | 202 | return NULL; |
207 | } | 203 | } |
208 | 204 | ||
205 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) | ||
206 | { | ||
207 | struct Qdisc *q; | ||
208 | |||
209 | read_lock(&qdisc_tree_lock); | ||
210 | q = __qdisc_lookup(dev, handle); | ||
211 | read_unlock(&qdisc_tree_lock); | ||
212 | return q; | ||
213 | } | ||
214 | |||
209 | static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) | 215 | static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) |
210 | { | 216 | { |
211 | unsigned long cl; | 217 | unsigned long cl; |
@@ -348,6 +354,26 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) | |||
348 | return oqdisc; | 354 | return oqdisc; |
349 | } | 355 | } |
350 | 356 | ||
357 | void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | ||
358 | { | ||
359 | struct Qdisc_class_ops *cops; | ||
360 | unsigned long cl; | ||
361 | u32 parentid; | ||
362 | |||
363 | if (n == 0) | ||
364 | return; | ||
365 | while ((parentid = sch->parent)) { | ||
366 | sch = __qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); | ||
367 | cops = sch->ops->cl_ops; | ||
368 | if (cops->qlen_notify) { | ||
369 | cl = cops->get(sch, parentid); | ||
370 | cops->qlen_notify(sch, cl); | ||
371 | cops->put(sch, cl); | ||
372 | } | ||
373 | sch->q.qlen -= n; | ||
374 | } | ||
375 | } | ||
376 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); | ||
351 | 377 | ||
352 | /* Graft qdisc "new" to class "classid" of qdisc "parent" or | 378 | /* Graft qdisc "new" to class "classid" of qdisc "parent" or |
353 | to device "dev". | 379 | to device "dev". |
@@ -1112,7 +1138,7 @@ int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, | |||
1112 | struct tcf_result *res) | 1138 | struct tcf_result *res) |
1113 | { | 1139 | { |
1114 | int err = 0; | 1140 | int err = 0; |
1115 | u32 protocol = skb->protocol; | 1141 | __be16 protocol = skb->protocol; |
1116 | #ifdef CONFIG_NET_CLS_ACT | 1142 | #ifdef CONFIG_NET_CLS_ACT |
1117 | struct tcf_proto *otp = tp; | 1143 | struct tcf_proto *otp = tp; |
1118 | reclassify: | 1144 | reclassify: |
@@ -1277,7 +1303,6 @@ static int __init pktsched_init(void) | |||
1277 | 1303 | ||
1278 | subsys_initcall(pktsched_init); | 1304 | subsys_initcall(pktsched_init); |
1279 | 1305 | ||
1280 | EXPORT_SYMBOL(qdisc_lookup); | ||
1281 | EXPORT_SYMBOL(qdisc_get_rtab); | 1306 | EXPORT_SYMBOL(qdisc_get_rtab); |
1282 | EXPORT_SYMBOL(qdisc_put_rtab); | 1307 | EXPORT_SYMBOL(qdisc_put_rtab); |
1283 | EXPORT_SYMBOL(register_qdisc); | 1308 | EXPORT_SYMBOL(register_qdisc); |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index dbf44da0912f..edc7bb0b9c8b 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -316,7 +316,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, | |||
316 | } | 316 | } |
317 | memset(flow,0,sizeof(*flow)); | 317 | memset(flow,0,sizeof(*flow)); |
318 | flow->filter_list = NULL; | 318 | flow->filter_list = NULL; |
319 | if (!(flow->q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops))) | 319 | if (!(flow->q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops,classid))) |
320 | flow->q = &noop_qdisc; | 320 | flow->q = &noop_qdisc; |
321 | DPRINTK("atm_tc_change: qdisc %p\n",flow->q); | 321 | DPRINTK("atm_tc_change: qdisc %p\n",flow->q); |
322 | flow->sock = sock; | 322 | flow->sock = sock; |
@@ -576,7 +576,8 @@ static int atm_tc_init(struct Qdisc *sch,struct rtattr *opt) | |||
576 | 576 | ||
577 | DPRINTK("atm_tc_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt); | 577 | DPRINTK("atm_tc_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt); |
578 | p->flows = &p->link; | 578 | p->flows = &p->link; |
579 | if(!(p->link.q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops))) | 579 | if(!(p->link.q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops, |
580 | sch->handle))) | ||
580 | p->link.q = &noop_qdisc; | 581 | p->link.q = &noop_qdisc; |
581 | DPRINTK("atm_tc_init: link (%p) qdisc %p\n",&p->link,p->link.q); | 582 | DPRINTK("atm_tc_init: link (%p) qdisc %p\n",&p->link,p->link.q); |
582 | p->link.filter_list = NULL; | 583 | p->link.filter_list = NULL; |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index bac881bfe362..ba82dfab6043 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -1429,7 +1429,8 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt) | |||
1429 | q->link.sibling = &q->link; | 1429 | q->link.sibling = &q->link; |
1430 | q->link.classid = sch->handle; | 1430 | q->link.classid = sch->handle; |
1431 | q->link.qdisc = sch; | 1431 | q->link.qdisc = sch; |
1432 | if (!(q->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops))) | 1432 | if (!(q->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, |
1433 | sch->handle))) | ||
1433 | q->link.q = &noop_qdisc; | 1434 | q->link.q = &noop_qdisc; |
1434 | 1435 | ||
1435 | q->link.priority = TC_CBQ_MAXPRIO-1; | 1436 | q->link.priority = TC_CBQ_MAXPRIO-1; |
@@ -1674,7 +1675,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1674 | 1675 | ||
1675 | if (cl) { | 1676 | if (cl) { |
1676 | if (new == NULL) { | 1677 | if (new == NULL) { |
1677 | if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)) == NULL) | 1678 | if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, |
1679 | cl->classid)) == NULL) | ||
1678 | return -ENOBUFS; | 1680 | return -ENOBUFS; |
1679 | } else { | 1681 | } else { |
1680 | #ifdef CONFIG_NET_CLS_POLICE | 1682 | #ifdef CONFIG_NET_CLS_POLICE |
@@ -1685,7 +1687,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1685 | sch_tree_lock(sch); | 1687 | sch_tree_lock(sch); |
1686 | *old = cl->q; | 1688 | *old = cl->q; |
1687 | cl->q = new; | 1689 | cl->q = new; |
1688 | sch->q.qlen -= (*old)->q.qlen; | 1690 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); |
1689 | qdisc_reset(*old); | 1691 | qdisc_reset(*old); |
1690 | sch_tree_unlock(sch); | 1692 | sch_tree_unlock(sch); |
1691 | 1693 | ||
@@ -1932,7 +1934,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t | |||
1932 | cl->R_tab = rtab; | 1934 | cl->R_tab = rtab; |
1933 | rtab = NULL; | 1935 | rtab = NULL; |
1934 | cl->refcnt = 1; | 1936 | cl->refcnt = 1; |
1935 | if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops))) | 1937 | if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid))) |
1936 | cl->q = &noop_qdisc; | 1938 | cl->q = &noop_qdisc; |
1937 | cl->classid = classid; | 1939 | cl->classid = classid; |
1938 | cl->tparent = parent; | 1940 | cl->tparent = parent; |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 11c8a2119b96..d5421816f007 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -88,15 +88,16 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg, | |||
88 | sch, p, new, old); | 88 | sch, p, new, old); |
89 | 89 | ||
90 | if (new == NULL) { | 90 | if (new == NULL) { |
91 | new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); | 91 | new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, |
92 | sch->handle); | ||
92 | if (new == NULL) | 93 | if (new == NULL) |
93 | new = &noop_qdisc; | 94 | new = &noop_qdisc; |
94 | } | 95 | } |
95 | 96 | ||
96 | sch_tree_lock(sch); | 97 | sch_tree_lock(sch); |
97 | *old = xchg(&p->q, new); | 98 | *old = xchg(&p->q, new); |
99 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | ||
98 | qdisc_reset(*old); | 100 | qdisc_reset(*old); |
99 | sch->q.qlen = 0; | ||
100 | sch_tree_unlock(sch); | 101 | sch_tree_unlock(sch); |
101 | 102 | ||
102 | return 0; | 103 | return 0; |
@@ -307,7 +308,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) | |||
307 | if (p->mask[index] != 0xff || p->value[index]) | 308 | if (p->mask[index] != 0xff || p->value[index]) |
308 | printk(KERN_WARNING "dsmark_dequeue: " | 309 | printk(KERN_WARNING "dsmark_dequeue: " |
309 | "unsupported protocol %d\n", | 310 | "unsupported protocol %d\n", |
310 | htons(skb->protocol)); | 311 | ntohs(skb->protocol)); |
311 | break; | 312 | break; |
312 | }; | 313 | }; |
313 | 314 | ||
@@ -387,7 +388,7 @@ static int dsmark_init(struct Qdisc *sch, struct rtattr *opt) | |||
387 | p->default_index = default_index; | 388 | p->default_index = default_index; |
388 | p->set_tc_index = RTA_GET_FLAG(tb[TCA_DSMARK_SET_TC_INDEX-1]); | 389 | p->set_tc_index = RTA_GET_FLAG(tb[TCA_DSMARK_SET_TC_INDEX-1]); |
389 | 390 | ||
390 | p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); | 391 | p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, sch->handle); |
391 | if (p->q == NULL) | 392 | if (p->q == NULL) |
392 | p->q = &noop_qdisc; | 393 | p->q = &noop_qdisc; |
393 | 394 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 88c6a99ce53c..bc116bd6937c 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -450,13 +450,15 @@ errout: | |||
450 | return ERR_PTR(-err); | 450 | return ERR_PTR(-err); |
451 | } | 451 | } |
452 | 452 | ||
453 | struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops) | 453 | struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, |
454 | unsigned int parentid) | ||
454 | { | 455 | { |
455 | struct Qdisc *sch; | 456 | struct Qdisc *sch; |
456 | 457 | ||
457 | sch = qdisc_alloc(dev, ops); | 458 | sch = qdisc_alloc(dev, ops); |
458 | if (IS_ERR(sch)) | 459 | if (IS_ERR(sch)) |
459 | goto errout; | 460 | goto errout; |
461 | sch->parent = parentid; | ||
460 | 462 | ||
461 | if (!ops->init || ops->init(sch, NULL) == 0) | 463 | if (!ops->init || ops->init(sch, NULL) == 0) |
462 | return sch; | 464 | return sch; |
@@ -520,7 +522,8 @@ void dev_activate(struct net_device *dev) | |||
520 | if (dev->qdisc_sleeping == &noop_qdisc) { | 522 | if (dev->qdisc_sleeping == &noop_qdisc) { |
521 | struct Qdisc *qdisc; | 523 | struct Qdisc *qdisc; |
522 | if (dev->tx_queue_len) { | 524 | if (dev->tx_queue_len) { |
523 | qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops); | 525 | qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops, |
526 | TC_H_ROOT); | ||
524 | if (qdisc == NULL) { | 527 | if (qdisc == NULL) { |
525 | printk(KERN_INFO "%s: activation failed\n", dev->name); | 528 | printk(KERN_INFO "%s: activation failed\n", dev->name); |
526 | return; | 529 | return; |
@@ -606,13 +609,10 @@ void dev_shutdown(struct net_device *dev) | |||
606 | qdisc_unlock_tree(dev); | 609 | qdisc_unlock_tree(dev); |
607 | } | 610 | } |
608 | 611 | ||
609 | EXPORT_SYMBOL(__netdev_watchdog_up); | ||
610 | EXPORT_SYMBOL(netif_carrier_on); | 612 | EXPORT_SYMBOL(netif_carrier_on); |
611 | EXPORT_SYMBOL(netif_carrier_off); | 613 | EXPORT_SYMBOL(netif_carrier_off); |
612 | EXPORT_SYMBOL(noop_qdisc); | 614 | EXPORT_SYMBOL(noop_qdisc); |
613 | EXPORT_SYMBOL(noop_qdisc_ops); | ||
614 | EXPORT_SYMBOL(qdisc_create_dflt); | 615 | EXPORT_SYMBOL(qdisc_create_dflt); |
615 | EXPORT_SYMBOL(qdisc_alloc); | ||
616 | EXPORT_SYMBOL(qdisc_destroy); | 616 | EXPORT_SYMBOL(qdisc_destroy); |
617 | EXPORT_SYMBOL(qdisc_reset); | 617 | EXPORT_SYMBOL(qdisc_reset); |
618 | EXPORT_SYMBOL(qdisc_lock_tree); | 618 | EXPORT_SYMBOL(qdisc_lock_tree); |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 6a6735a2ed35..6eefa6995777 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -946,6 +946,7 @@ qdisc_peek_len(struct Qdisc *sch) | |||
946 | if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) { | 946 | if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) { |
947 | if (net_ratelimit()) | 947 | if (net_ratelimit()) |
948 | printk("qdisc_peek_len: failed to requeue\n"); | 948 | printk("qdisc_peek_len: failed to requeue\n"); |
949 | qdisc_tree_decrease_qlen(sch, 1); | ||
949 | return 0; | 950 | return 0; |
950 | } | 951 | } |
951 | return len; | 952 | return len; |
@@ -957,11 +958,7 @@ hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) | |||
957 | unsigned int len = cl->qdisc->q.qlen; | 958 | unsigned int len = cl->qdisc->q.qlen; |
958 | 959 | ||
959 | qdisc_reset(cl->qdisc); | 960 | qdisc_reset(cl->qdisc); |
960 | if (len > 0) { | 961 | qdisc_tree_decrease_qlen(cl->qdisc, len); |
961 | update_vf(cl, 0, 0); | ||
962 | set_passive(cl); | ||
963 | sch->q.qlen -= len; | ||
964 | } | ||
965 | } | 962 | } |
966 | 963 | ||
967 | static void | 964 | static void |
@@ -1138,7 +1135,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
1138 | cl->classid = classid; | 1135 | cl->classid = classid; |
1139 | cl->sched = q; | 1136 | cl->sched = q; |
1140 | cl->cl_parent = parent; | 1137 | cl->cl_parent = parent; |
1141 | cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); | 1138 | cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); |
1142 | if (cl->qdisc == NULL) | 1139 | if (cl->qdisc == NULL) |
1143 | cl->qdisc = &noop_qdisc; | 1140 | cl->qdisc = &noop_qdisc; |
1144 | cl->stats_lock = &sch->dev->queue_lock; | 1141 | cl->stats_lock = &sch->dev->queue_lock; |
@@ -1271,7 +1268,8 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1271 | if (cl->level > 0) | 1268 | if (cl->level > 0) |
1272 | return -EINVAL; | 1269 | return -EINVAL; |
1273 | if (new == NULL) { | 1270 | if (new == NULL) { |
1274 | new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); | 1271 | new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, |
1272 | cl->classid); | ||
1275 | if (new == NULL) | 1273 | if (new == NULL) |
1276 | new = &noop_qdisc; | 1274 | new = &noop_qdisc; |
1277 | } | 1275 | } |
@@ -1294,6 +1292,17 @@ hfsc_class_leaf(struct Qdisc *sch, unsigned long arg) | |||
1294 | return NULL; | 1292 | return NULL; |
1295 | } | 1293 | } |
1296 | 1294 | ||
1295 | static void | ||
1296 | hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) | ||
1297 | { | ||
1298 | struct hfsc_class *cl = (struct hfsc_class *)arg; | ||
1299 | |||
1300 | if (cl->qdisc->q.qlen == 0) { | ||
1301 | update_vf(cl, 0, 0); | ||
1302 | set_passive(cl); | ||
1303 | } | ||
1304 | } | ||
1305 | |||
1297 | static unsigned long | 1306 | static unsigned long |
1298 | hfsc_get_class(struct Qdisc *sch, u32 classid) | 1307 | hfsc_get_class(struct Qdisc *sch, u32 classid) |
1299 | { | 1308 | { |
@@ -1514,7 +1523,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt) | |||
1514 | q->root.refcnt = 1; | 1523 | q->root.refcnt = 1; |
1515 | q->root.classid = sch->handle; | 1524 | q->root.classid = sch->handle; |
1516 | q->root.sched = q; | 1525 | q->root.sched = q; |
1517 | q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); | 1526 | q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, |
1527 | sch->handle); | ||
1518 | if (q->root.qdisc == NULL) | 1528 | if (q->root.qdisc == NULL) |
1519 | q->root.qdisc = &noop_qdisc; | 1529 | q->root.qdisc = &noop_qdisc; |
1520 | q->root.stats_lock = &sch->dev->queue_lock; | 1530 | q->root.stats_lock = &sch->dev->queue_lock; |
@@ -1777,6 +1787,7 @@ static struct Qdisc_class_ops hfsc_class_ops = { | |||
1777 | .delete = hfsc_delete_class, | 1787 | .delete = hfsc_delete_class, |
1778 | .graft = hfsc_graft_class, | 1788 | .graft = hfsc_graft_class, |
1779 | .leaf = hfsc_class_leaf, | 1789 | .leaf = hfsc_class_leaf, |
1790 | .qlen_notify = hfsc_qlen_notify, | ||
1780 | .get = hfsc_get_class, | 1791 | .get = hfsc_get_class, |
1781 | .put = hfsc_put_class, | 1792 | .put = hfsc_put_class, |
1782 | .bind_tcf = hfsc_bind_tcf, | 1793 | .bind_tcf = hfsc_bind_tcf, |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 4b52fa78935a..215e68c2b615 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1223,17 +1223,14 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1223 | struct htb_class *cl = (struct htb_class *)arg; | 1223 | struct htb_class *cl = (struct htb_class *)arg; |
1224 | 1224 | ||
1225 | if (cl && !cl->level) { | 1225 | if (cl && !cl->level) { |
1226 | if (new == NULL && (new = qdisc_create_dflt(sch->dev, | 1226 | if (new == NULL && |
1227 | &pfifo_qdisc_ops)) | 1227 | (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, |
1228 | cl->classid)) | ||
1228 | == NULL) | 1229 | == NULL) |
1229 | return -ENOBUFS; | 1230 | return -ENOBUFS; |
1230 | sch_tree_lock(sch); | 1231 | sch_tree_lock(sch); |
1231 | if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) { | 1232 | if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) { |
1232 | if (cl->prio_activity) | 1233 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); |
1233 | htb_deactivate(qdisc_priv(sch), cl); | ||
1234 | |||
1235 | /* TODO: is it correct ? Why CBQ doesn't do it ? */ | ||
1236 | sch->q.qlen -= (*old)->q.qlen; | ||
1237 | qdisc_reset(*old); | 1234 | qdisc_reset(*old); |
1238 | } | 1235 | } |
1239 | sch_tree_unlock(sch); | 1236 | sch_tree_unlock(sch); |
@@ -1248,6 +1245,14 @@ static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg) | |||
1248 | return (cl && !cl->level) ? cl->un.leaf.q : NULL; | 1245 | return (cl && !cl->level) ? cl->un.leaf.q : NULL; |
1249 | } | 1246 | } |
1250 | 1247 | ||
1248 | static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) | ||
1249 | { | ||
1250 | struct htb_class *cl = (struct htb_class *)arg; | ||
1251 | |||
1252 | if (cl->un.leaf.q->q.qlen == 0) | ||
1253 | htb_deactivate(qdisc_priv(sch), cl); | ||
1254 | } | ||
1255 | |||
1251 | static unsigned long htb_get(struct Qdisc *sch, u32 classid) | 1256 | static unsigned long htb_get(struct Qdisc *sch, u32 classid) |
1252 | { | 1257 | { |
1253 | struct htb_class *cl = htb_find(classid, sch); | 1258 | struct htb_class *cl = htb_find(classid, sch); |
@@ -1269,9 +1274,9 @@ static void htb_destroy_filters(struct tcf_proto **fl) | |||
1269 | static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) | 1274 | static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) |
1270 | { | 1275 | { |
1271 | struct htb_sched *q = qdisc_priv(sch); | 1276 | struct htb_sched *q = qdisc_priv(sch); |
1277 | |||
1272 | if (!cl->level) { | 1278 | if (!cl->level) { |
1273 | BUG_TRAP(cl->un.leaf.q); | 1279 | BUG_TRAP(cl->un.leaf.q); |
1274 | sch->q.qlen -= cl->un.leaf.q->q.qlen; | ||
1275 | qdisc_destroy(cl->un.leaf.q); | 1280 | qdisc_destroy(cl->un.leaf.q); |
1276 | } | 1281 | } |
1277 | qdisc_put_rtab(cl->rate); | 1282 | qdisc_put_rtab(cl->rate); |
@@ -1322,6 +1327,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) | |||
1322 | { | 1327 | { |
1323 | struct htb_sched *q = qdisc_priv(sch); | 1328 | struct htb_sched *q = qdisc_priv(sch); |
1324 | struct htb_class *cl = (struct htb_class *)arg; | 1329 | struct htb_class *cl = (struct htb_class *)arg; |
1330 | unsigned int qlen; | ||
1325 | 1331 | ||
1326 | // TODO: why don't allow to delete subtree ? references ? does | 1332 | // TODO: why don't allow to delete subtree ? references ? does |
1327 | // tc subsys quarantee us that in htb_destroy it holds no class | 1333 | // tc subsys quarantee us that in htb_destroy it holds no class |
@@ -1334,6 +1340,12 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) | |||
1334 | /* delete from hash and active; remainder in destroy_class */ | 1340 | /* delete from hash and active; remainder in destroy_class */ |
1335 | hlist_del_init(&cl->hlist); | 1341 | hlist_del_init(&cl->hlist); |
1336 | 1342 | ||
1343 | if (!cl->level) { | ||
1344 | qlen = cl->un.leaf.q->q.qlen; | ||
1345 | qdisc_reset(cl->un.leaf.q); | ||
1346 | qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen); | ||
1347 | } | ||
1348 | |||
1337 | if (cl->prio_activity) | 1349 | if (cl->prio_activity) |
1338 | htb_deactivate(q, cl); | 1350 | htb_deactivate(q, cl); |
1339 | 1351 | ||
@@ -1410,11 +1422,14 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1410 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) | 1422 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) |
1411 | so that can't be used inside of sch_tree_lock | 1423 | so that can't be used inside of sch_tree_lock |
1412 | -- thanks to Karlis Peisenieks */ | 1424 | -- thanks to Karlis Peisenieks */ |
1413 | new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); | 1425 | new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); |
1414 | sch_tree_lock(sch); | 1426 | sch_tree_lock(sch); |
1415 | if (parent && !parent->level) { | 1427 | if (parent && !parent->level) { |
1428 | unsigned int qlen = parent->un.leaf.q->q.qlen; | ||
1429 | |||
1416 | /* turn parent into inner node */ | 1430 | /* turn parent into inner node */ |
1417 | sch->q.qlen -= parent->un.leaf.q->q.qlen; | 1431 | qdisc_reset(parent->un.leaf.q); |
1432 | qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen); | ||
1418 | qdisc_destroy(parent->un.leaf.q); | 1433 | qdisc_destroy(parent->un.leaf.q); |
1419 | if (parent->prio_activity) | 1434 | if (parent->prio_activity) |
1420 | htb_deactivate(q, parent); | 1435 | htb_deactivate(q, parent); |
@@ -1562,6 +1577,7 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
1562 | static struct Qdisc_class_ops htb_class_ops = { | 1577 | static struct Qdisc_class_ops htb_class_ops = { |
1563 | .graft = htb_graft, | 1578 | .graft = htb_graft, |
1564 | .leaf = htb_leaf, | 1579 | .leaf = htb_leaf, |
1580 | .qlen_notify = htb_qlen_notify, | ||
1565 | .get = htb_get, | 1581 | .get = htb_get, |
1566 | .put = htb_put, | 1582 | .put = htb_put, |
1567 | .change = htb_change_class, | 1583 | .change = htb_change_class, |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 0441876aa1e7..79542af9dab1 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -287,13 +287,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) | |||
287 | psched_tdiff_t delay = PSCHED_TDIFF(cb->time_to_send, now); | 287 | psched_tdiff_t delay = PSCHED_TDIFF(cb->time_to_send, now); |
288 | 288 | ||
289 | if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) { | 289 | if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) { |
290 | qdisc_tree_decrease_qlen(q->qdisc, 1); | ||
290 | sch->qstats.drops++; | 291 | sch->qstats.drops++; |
291 | |||
292 | /* After this qlen is confused */ | ||
293 | printk(KERN_ERR "netem: queue discpline %s could not requeue\n", | 292 | printk(KERN_ERR "netem: queue discpline %s could not requeue\n", |
294 | q->qdisc->ops->id); | 293 | q->qdisc->ops->id); |
295 | |||
296 | sch->q.qlen--; | ||
297 | } | 294 | } |
298 | 295 | ||
299 | mod_timer(&q->timer, jiffies + PSCHED_US2JIFFIE(delay)); | 296 | mod_timer(&q->timer, jiffies + PSCHED_US2JIFFIE(delay)); |
@@ -574,7 +571,8 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt) | |||
574 | q->timer.function = netem_watchdog; | 571 | q->timer.function = netem_watchdog; |
575 | q->timer.data = (unsigned long) sch; | 572 | q->timer.data = (unsigned long) sch; |
576 | 573 | ||
577 | q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops); | 574 | q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops, |
575 | TC_H_MAKE(sch->handle, 1)); | ||
578 | if (!q->qdisc) { | 576 | if (!q->qdisc) { |
579 | pr_debug("netem: qdisc create failed\n"); | 577 | pr_debug("netem: qdisc create failed\n"); |
580 | return -ENOMEM; | 578 | return -ENOMEM; |
@@ -661,8 +659,8 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
661 | 659 | ||
662 | sch_tree_lock(sch); | 660 | sch_tree_lock(sch); |
663 | *old = xchg(&q->qdisc, new); | 661 | *old = xchg(&q->qdisc, new); |
662 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | ||
664 | qdisc_reset(*old); | 663 | qdisc_reset(*old); |
665 | sch->q.qlen = 0; | ||
666 | sch_tree_unlock(sch); | 664 | sch_tree_unlock(sch); |
667 | 665 | ||
668 | return 0; | 666 | return 0; |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index a5fa03c0c19b..2567b4c96c1e 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -222,21 +222,27 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt) | |||
222 | 222 | ||
223 | for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { | 223 | for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { |
224 | struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc); | 224 | struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc); |
225 | if (child != &noop_qdisc) | 225 | if (child != &noop_qdisc) { |
226 | qdisc_tree_decrease_qlen(child, child->q.qlen); | ||
226 | qdisc_destroy(child); | 227 | qdisc_destroy(child); |
228 | } | ||
227 | } | 229 | } |
228 | sch_tree_unlock(sch); | 230 | sch_tree_unlock(sch); |
229 | 231 | ||
230 | for (i=0; i<q->bands; i++) { | 232 | for (i=0; i<q->bands; i++) { |
231 | if (q->queues[i] == &noop_qdisc) { | 233 | if (q->queues[i] == &noop_qdisc) { |
232 | struct Qdisc *child; | 234 | struct Qdisc *child; |
233 | child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); | 235 | child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, |
236 | TC_H_MAKE(sch->handle, i + 1)); | ||
234 | if (child) { | 237 | if (child) { |
235 | sch_tree_lock(sch); | 238 | sch_tree_lock(sch); |
236 | child = xchg(&q->queues[i], child); | 239 | child = xchg(&q->queues[i], child); |
237 | 240 | ||
238 | if (child != &noop_qdisc) | 241 | if (child != &noop_qdisc) { |
242 | qdisc_tree_decrease_qlen(child, | ||
243 | child->q.qlen); | ||
239 | qdisc_destroy(child); | 244 | qdisc_destroy(child); |
245 | } | ||
240 | sch_tree_unlock(sch); | 246 | sch_tree_unlock(sch); |
241 | } | 247 | } |
242 | } | 248 | } |
@@ -294,7 +300,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
294 | sch_tree_lock(sch); | 300 | sch_tree_lock(sch); |
295 | *old = q->queues[band]; | 301 | *old = q->queues[band]; |
296 | q->queues[band] = new; | 302 | q->queues[band] = new; |
297 | sch->q.qlen -= (*old)->q.qlen; | 303 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); |
298 | qdisc_reset(*old); | 304 | qdisc_reset(*old); |
299 | sch_tree_unlock(sch); | 305 | sch_tree_unlock(sch); |
300 | 306 | ||
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index d65cadddea69..acddad08850f 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -175,12 +175,14 @@ static void red_destroy(struct Qdisc *sch) | |||
175 | qdisc_destroy(q->qdisc); | 175 | qdisc_destroy(q->qdisc); |
176 | } | 176 | } |
177 | 177 | ||
178 | static struct Qdisc *red_create_dflt(struct net_device *dev, u32 limit) | 178 | static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit) |
179 | { | 179 | { |
180 | struct Qdisc *q = qdisc_create_dflt(dev, &bfifo_qdisc_ops); | 180 | struct Qdisc *q; |
181 | struct rtattr *rta; | 181 | struct rtattr *rta; |
182 | int ret; | 182 | int ret; |
183 | 183 | ||
184 | q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, | ||
185 | TC_H_MAKE(sch->handle, 1)); | ||
184 | if (q) { | 186 | if (q) { |
185 | rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), | 187 | rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), |
186 | GFP_KERNEL); | 188 | GFP_KERNEL); |
@@ -219,7 +221,7 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt) | |||
219 | ctl = RTA_DATA(tb[TCA_RED_PARMS-1]); | 221 | ctl = RTA_DATA(tb[TCA_RED_PARMS-1]); |
220 | 222 | ||
221 | if (ctl->limit > 0) { | 223 | if (ctl->limit > 0) { |
222 | child = red_create_dflt(sch->dev, ctl->limit); | 224 | child = red_create_dflt(sch, ctl->limit); |
223 | if (child == NULL) | 225 | if (child == NULL) |
224 | return -ENOMEM; | 226 | return -ENOMEM; |
225 | } | 227 | } |
@@ -227,8 +229,10 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt) | |||
227 | sch_tree_lock(sch); | 229 | sch_tree_lock(sch); |
228 | q->flags = ctl->flags; | 230 | q->flags = ctl->flags; |
229 | q->limit = ctl->limit; | 231 | q->limit = ctl->limit; |
230 | if (child) | 232 | if (child) { |
233 | qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); | ||
231 | qdisc_destroy(xchg(&q->qdisc, child)); | 234 | qdisc_destroy(xchg(&q->qdisc, child)); |
235 | } | ||
232 | 236 | ||
233 | red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, | 237 | red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, |
234 | ctl->Plog, ctl->Scell_log, | 238 | ctl->Plog, ctl->Scell_log, |
@@ -306,8 +310,8 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
306 | 310 | ||
307 | sch_tree_lock(sch); | 311 | sch_tree_lock(sch); |
308 | *old = xchg(&q->qdisc, new); | 312 | *old = xchg(&q->qdisc, new); |
313 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | ||
309 | qdisc_reset(*old); | 314 | qdisc_reset(*old); |
310 | sch->q.qlen = 0; | ||
311 | sch_tree_unlock(sch); | 315 | sch_tree_unlock(sch); |
312 | return 0; | 316 | return 0; |
313 | } | 317 | } |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index d0d6e595a78c..459cda258a5c 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -393,6 +393,7 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt) | |||
393 | { | 393 | { |
394 | struct sfq_sched_data *q = qdisc_priv(sch); | 394 | struct sfq_sched_data *q = qdisc_priv(sch); |
395 | struct tc_sfq_qopt *ctl = RTA_DATA(opt); | 395 | struct tc_sfq_qopt *ctl = RTA_DATA(opt); |
396 | unsigned int qlen; | ||
396 | 397 | ||
397 | if (opt->rta_len < RTA_LENGTH(sizeof(*ctl))) | 398 | if (opt->rta_len < RTA_LENGTH(sizeof(*ctl))) |
398 | return -EINVAL; | 399 | return -EINVAL; |
@@ -403,8 +404,10 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt) | |||
403 | if (ctl->limit) | 404 | if (ctl->limit) |
404 | q->limit = min_t(u32, ctl->limit, SFQ_DEPTH); | 405 | q->limit = min_t(u32, ctl->limit, SFQ_DEPTH); |
405 | 406 | ||
407 | qlen = sch->q.qlen; | ||
406 | while (sch->q.qlen >= q->limit-1) | 408 | while (sch->q.qlen >= q->limit-1) |
407 | sfq_drop(sch); | 409 | sfq_drop(sch); |
410 | qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); | ||
408 | 411 | ||
409 | del_timer(&q->perturb_timer); | 412 | del_timer(&q->perturb_timer); |
410 | if (q->perturb_period) { | 413 | if (q->perturb_period) { |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index d9a5d298d755..ed9b6d938540 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -250,7 +250,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch) | |||
250 | 250 | ||
251 | if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) { | 251 | if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) { |
252 | /* When requeue fails skb is dropped */ | 252 | /* When requeue fails skb is dropped */ |
253 | sch->q.qlen--; | 253 | qdisc_tree_decrease_qlen(q->qdisc, 1); |
254 | sch->qstats.drops++; | 254 | sch->qstats.drops++; |
255 | } | 255 | } |
256 | 256 | ||
@@ -273,12 +273,14 @@ static void tbf_reset(struct Qdisc* sch) | |||
273 | del_timer(&q->wd_timer); | 273 | del_timer(&q->wd_timer); |
274 | } | 274 | } |
275 | 275 | ||
276 | static struct Qdisc *tbf_create_dflt_qdisc(struct net_device *dev, u32 limit) | 276 | static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit) |
277 | { | 277 | { |
278 | struct Qdisc *q = qdisc_create_dflt(dev, &bfifo_qdisc_ops); | 278 | struct Qdisc *q; |
279 | struct rtattr *rta; | 279 | struct rtattr *rta; |
280 | int ret; | 280 | int ret; |
281 | 281 | ||
282 | q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, | ||
283 | TC_H_MAKE(sch->handle, 1)); | ||
282 | if (q) { | 284 | if (q) { |
283 | rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); | 285 | rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); |
284 | if (rta) { | 286 | if (rta) { |
@@ -341,13 +343,15 @@ static int tbf_change(struct Qdisc* sch, struct rtattr *opt) | |||
341 | goto done; | 343 | goto done; |
342 | 344 | ||
343 | if (qopt->limit > 0) { | 345 | if (qopt->limit > 0) { |
344 | if ((child = tbf_create_dflt_qdisc(sch->dev, qopt->limit)) == NULL) | 346 | if ((child = tbf_create_dflt_qdisc(sch, qopt->limit)) == NULL) |
345 | goto done; | 347 | goto done; |
346 | } | 348 | } |
347 | 349 | ||
348 | sch_tree_lock(sch); | 350 | sch_tree_lock(sch); |
349 | if (child) | 351 | if (child) { |
352 | qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); | ||
350 | qdisc_destroy(xchg(&q->qdisc, child)); | 353 | qdisc_destroy(xchg(&q->qdisc, child)); |
354 | } | ||
351 | q->limit = qopt->limit; | 355 | q->limit = qopt->limit; |
352 | q->mtu = qopt->mtu; | 356 | q->mtu = qopt->mtu; |
353 | q->max_size = max_size; | 357 | q->max_size = max_size; |
@@ -449,8 +453,8 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
449 | 453 | ||
450 | sch_tree_lock(sch); | 454 | sch_tree_lock(sch); |
451 | *old = xchg(&q->qdisc, new); | 455 | *old = xchg(&q->qdisc, new); |
456 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | ||
452 | qdisc_reset(*old); | 457 | qdisc_reset(*old); |
453 | sch->q.qlen = 0; | ||
454 | sch_tree_unlock(sch); | 458 | sch_tree_unlock(sch); |
455 | 459 | ||
456 | return 0; | 460 | return 0; |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 88124696ba60..ad0057db0f91 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -486,7 +486,7 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, | |||
486 | " port: %d\n", | 486 | " port: %d\n", |
487 | asoc, | 487 | asoc, |
488 | (&peer->ipaddr), | 488 | (&peer->ipaddr), |
489 | peer->ipaddr.v4.sin_port); | 489 | ntohs(peer->ipaddr.v4.sin_port)); |
490 | 490 | ||
491 | /* If we are to remove the current retran_path, update it | 491 | /* If we are to remove the current retran_path, update it |
492 | * to the next peer before removing this peer from the list. | 492 | * to the next peer before removing this peer from the list. |
@@ -535,13 +535,13 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, | |||
535 | sp = sctp_sk(asoc->base.sk); | 535 | sp = sctp_sk(asoc->base.sk); |
536 | 536 | ||
537 | /* AF_INET and AF_INET6 share common port field. */ | 537 | /* AF_INET and AF_INET6 share common port field. */ |
538 | port = addr->v4.sin_port; | 538 | port = ntohs(addr->v4.sin_port); |
539 | 539 | ||
540 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", | 540 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", |
541 | " port: %d state:%d\n", | 541 | " port: %d state:%d\n", |
542 | asoc, | 542 | asoc, |
543 | addr, | 543 | addr, |
544 | addr->v4.sin_port, | 544 | port, |
545 | peer_state); | 545 | peer_state); |
546 | 546 | ||
547 | /* Set the port if it has not been set yet. */ | 547 | /* Set the port if it has not been set yet. */ |
@@ -707,6 +707,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
707 | struct sctp_transport *first; | 707 | struct sctp_transport *first; |
708 | struct sctp_transport *second; | 708 | struct sctp_transport *second; |
709 | struct sctp_ulpevent *event; | 709 | struct sctp_ulpevent *event; |
710 | struct sockaddr_storage addr; | ||
710 | struct list_head *pos; | 711 | struct list_head *pos; |
711 | int spc_state = 0; | 712 | int spc_state = 0; |
712 | 713 | ||
@@ -729,8 +730,9 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
729 | /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the | 730 | /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the |
730 | * user. | 731 | * user. |
731 | */ | 732 | */ |
732 | event = sctp_ulpevent_make_peer_addr_change(asoc, | 733 | memset(&addr, 0, sizeof(struct sockaddr_storage)); |
733 | (struct sockaddr_storage *) &transport->ipaddr, | 734 | memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len); |
735 | event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, | ||
734 | 0, spc_state, error, GFP_ATOMIC); | 736 | 0, spc_state, error, GFP_ATOMIC); |
735 | if (event) | 737 | if (event) |
736 | sctp_ulpq_tail_event(&asoc->ulpq, event); | 738 | sctp_ulpq_tail_event(&asoc->ulpq, event); |
@@ -866,7 +868,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, | |||
866 | struct list_head *entry, *pos; | 868 | struct list_head *entry, *pos; |
867 | struct sctp_transport *transport; | 869 | struct sctp_transport *transport; |
868 | struct sctp_chunk *chunk; | 870 | struct sctp_chunk *chunk; |
869 | __u32 key = htonl(tsn); | 871 | __be32 key = htonl(tsn); |
870 | 872 | ||
871 | match = NULL; | 873 | match = NULL; |
872 | 874 | ||
@@ -924,8 +926,8 @@ struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc, | |||
924 | 926 | ||
925 | sctp_read_lock(&asoc->base.addr_lock); | 927 | sctp_read_lock(&asoc->base.addr_lock); |
926 | 928 | ||
927 | if ((asoc->base.bind_addr.port == laddr->v4.sin_port) && | 929 | if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) && |
928 | (asoc->peer.port == paddr->v4.sin_port)) { | 930 | (htons(asoc->peer.port) == paddr->v4.sin_port)) { |
929 | transport = sctp_assoc_lookup_paddr(asoc, paddr); | 931 | transport = sctp_assoc_lookup_paddr(asoc, paddr); |
930 | if (!transport) | 932 | if (!transport) |
931 | goto out; | 933 | goto out; |
@@ -1136,7 +1138,7 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc) | |||
1136 | " port: %d\n", | 1138 | " port: %d\n", |
1137 | asoc, | 1139 | asoc, |
1138 | (&t->ipaddr), | 1140 | (&t->ipaddr), |
1139 | t->ipaddr.v4.sin_port); | 1141 | ntohs(t->ipaddr.v4.sin_port)); |
1140 | } | 1142 | } |
1141 | 1143 | ||
1142 | /* Choose the transport for sending a INIT packet. */ | 1144 | /* Choose the transport for sending a INIT packet. */ |
@@ -1161,7 +1163,7 @@ struct sctp_transport *sctp_assoc_choose_init_transport( | |||
1161 | " port: %d\n", | 1163 | " port: %d\n", |
1162 | asoc, | 1164 | asoc, |
1163 | (&t->ipaddr), | 1165 | (&t->ipaddr), |
1164 | t->ipaddr.v4.sin_port); | 1166 | ntohs(t->ipaddr.v4.sin_port)); |
1165 | 1167 | ||
1166 | return t; | 1168 | return t; |
1167 | } | 1169 | } |
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 2b9c12a170e5..00994158e496 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c | |||
@@ -161,7 +161,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, | |||
161 | * Both v4 and v6 have the port at the same offset. | 161 | * Both v4 and v6 have the port at the same offset. |
162 | */ | 162 | */ |
163 | if (!addr->a.v4.sin_port) | 163 | if (!addr->a.v4.sin_port) |
164 | addr->a.v4.sin_port = bp->port; | 164 | addr->a.v4.sin_port = htons(bp->port); |
165 | 165 | ||
166 | addr->use_as_src = use_as_src; | 166 | addr->use_as_src = use_as_src; |
167 | 167 | ||
@@ -275,7 +275,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, | |||
275 | break; | 275 | break; |
276 | } | 276 | } |
277 | 277 | ||
278 | af->from_addr_param(&addr, rawaddr, port, 0); | 278 | af->from_addr_param(&addr, rawaddr, htons(port), 0); |
279 | retval = sctp_add_bind_addr(bp, &addr, 1, gfp); | 279 | retval = sctp_add_bind_addr(bp, &addr, 1, gfp); |
280 | if (retval) { | 280 | if (retval) { |
281 | /* Can't finish building the list, clean up. */ | 281 | /* Can't finish building the list, clean up. */ |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index a2b553721514..129756908da4 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -72,6 +72,10 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
72 | { | 72 | { |
73 | memset(ep, 0, sizeof(struct sctp_endpoint)); | 73 | memset(ep, 0, sizeof(struct sctp_endpoint)); |
74 | 74 | ||
75 | ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); | ||
76 | if (!ep->digest) | ||
77 | return NULL; | ||
78 | |||
75 | /* Initialize the base structure. */ | 79 | /* Initialize the base structure. */ |
76 | /* What type of endpoint are we? */ | 80 | /* What type of endpoint are we? */ |
77 | ep->base.type = SCTP_EP_TYPE_SOCKET; | 81 | ep->base.type = SCTP_EP_TYPE_SOCKET; |
@@ -181,6 +185,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) | |||
181 | /* Free up the HMAC transform. */ | 185 | /* Free up the HMAC transform. */ |
182 | crypto_free_hash(sctp_sk(ep->base.sk)->hmac); | 186 | crypto_free_hash(sctp_sk(ep->base.sk)->hmac); |
183 | 187 | ||
188 | /* Free the digest buffer */ | ||
189 | kfree(ep->digest); | ||
190 | |||
184 | /* Cleanup. */ | 191 | /* Cleanup. */ |
185 | sctp_inq_free(&ep->base.inqueue); | 192 | sctp_inq_free(&ep->base.inqueue); |
186 | sctp_bind_addr_free(&ep->base.bind_addr); | 193 | sctp_bind_addr_free(&ep->base.bind_addr); |
@@ -222,7 +229,7 @@ struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, | |||
222 | struct sctp_endpoint *retval; | 229 | struct sctp_endpoint *retval; |
223 | 230 | ||
224 | sctp_read_lock(&ep->base.addr_lock); | 231 | sctp_read_lock(&ep->base.addr_lock); |
225 | if (ep->base.bind_addr.port == laddr->v4.sin_port) { | 232 | if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) { |
226 | if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, | 233 | if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, |
227 | sctp_sk(ep->base.sk))) { | 234 | sctp_sk(ep->base.sk))) { |
228 | retval = ep; | 235 | retval = ep; |
@@ -250,7 +257,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc( | |||
250 | struct sctp_association *asoc; | 257 | struct sctp_association *asoc; |
251 | struct list_head *pos; | 258 | struct list_head *pos; |
252 | 259 | ||
253 | rport = paddr->v4.sin_port; | 260 | rport = ntohs(paddr->v4.sin_port); |
254 | 261 | ||
255 | list_for_each(pos, &ep->asocs) { | 262 | list_for_each(pos, &ep->asocs) { |
256 | asoc = list_entry(pos, struct sctp_association, asocs); | 263 | asoc = list_entry(pos, struct sctp_association, asocs); |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 6d82f400d13c..33111873a488 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -726,7 +726,7 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l | |||
726 | struct sctp_endpoint *ep; | 726 | struct sctp_endpoint *ep; |
727 | int hash; | 727 | int hash; |
728 | 728 | ||
729 | hash = sctp_ep_hashfn(laddr->v4.sin_port); | 729 | hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port)); |
730 | head = &sctp_ep_hashtable[hash]; | 730 | head = &sctp_ep_hashtable[hash]; |
731 | read_lock(&head->lock); | 731 | read_lock(&head->lock); |
732 | for (epb = head->chain; epb; epb = epb->next) { | 732 | for (epb = head->chain; epb; epb = epb->next) { |
@@ -830,7 +830,7 @@ static struct sctp_association *__sctp_lookup_association( | |||
830 | /* Optimize here for direct hit, only listening connections can | 830 | /* Optimize here for direct hit, only listening connections can |
831 | * have wildcards anyways. | 831 | * have wildcards anyways. |
832 | */ | 832 | */ |
833 | hash = sctp_assoc_hashfn(local->v4.sin_port, peer->v4.sin_port); | 833 | hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port)); |
834 | head = &sctp_assoc_hashtable[hash]; | 834 | head = &sctp_assoc_hashtable[hash]; |
835 | read_lock(&head->lock); | 835 | read_lock(&head->lock); |
836 | for (epb = head->chain; epb; epb = epb->next) { | 836 | for (epb = head->chain; epb; epb = epb->next) { |
@@ -957,7 +957,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, | |||
957 | if (!af) | 957 | if (!af) |
958 | continue; | 958 | continue; |
959 | 959 | ||
960 | af->from_addr_param(paddr, params.addr, ntohs(sh->source), 0); | 960 | af->from_addr_param(paddr, params.addr, sh->source, 0); |
961 | 961 | ||
962 | asoc = __sctp_lookup_association(laddr, paddr, &transport); | 962 | asoc = __sctp_lookup_association(laddr, paddr, &transport); |
963 | if (asoc) | 963 | if (asoc) |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 78071c6e6cf1..3c3e560087ca 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -84,7 +84,7 @@ static struct notifier_block sctp_inet6addr_notifier = { | |||
84 | 84 | ||
85 | /* ICMP error handler. */ | 85 | /* ICMP error handler. */ |
86 | SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 86 | SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
87 | int type, int code, int offset, __u32 info) | 87 | int type, int code, int offset, __be32 info) |
88 | { | 88 | { |
89 | struct inet6_dev *idev; | 89 | struct inet6_dev *idev; |
90 | struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; | 90 | struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; |
@@ -170,8 +170,6 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport, | |||
170 | fl.oif = transport->saddr.v6.sin6_scope_id; | 170 | fl.oif = transport->saddr.v6.sin6_scope_id; |
171 | else | 171 | else |
172 | fl.oif = sk->sk_bound_dev_if; | 172 | fl.oif = sk->sk_bound_dev_if; |
173 | fl.fl_ip_sport = inet_sk(sk)->sport; | ||
174 | fl.fl_ip_dport = transport->ipaddr.v6.sin6_port; | ||
175 | 173 | ||
176 | if (np->opt && np->opt->srcrt) { | 174 | if (np->opt && np->opt->srcrt) { |
177 | struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; | 175 | struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; |
@@ -239,7 +237,7 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1, | |||
239 | int i, j; | 237 | int i, j; |
240 | 238 | ||
241 | for (i = 0; i < 4 ; i++) { | 239 | for (i = 0; i < 4 ; i++) { |
242 | __u32 a1xora2; | 240 | __be32 a1xora2; |
243 | 241 | ||
244 | a1xora2 = a1->s6_addr32[i] ^ a2->s6_addr32[i]; | 242 | a1xora2 = a1->s6_addr32[i] ^ a2->s6_addr32[i]; |
245 | 243 | ||
@@ -350,7 +348,7 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb, | |||
350 | int is_saddr) | 348 | int is_saddr) |
351 | { | 349 | { |
352 | void *from; | 350 | void *from; |
353 | __u16 *port; | 351 | __be16 *port; |
354 | struct sctphdr *sh; | 352 | struct sctphdr *sh; |
355 | 353 | ||
356 | port = &addr->v6.sin6_port; | 354 | port = &addr->v6.sin6_port; |
@@ -360,10 +358,10 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb, | |||
360 | 358 | ||
361 | sh = (struct sctphdr *) skb->h.raw; | 359 | sh = (struct sctphdr *) skb->h.raw; |
362 | if (is_saddr) { | 360 | if (is_saddr) { |
363 | *port = ntohs(sh->source); | 361 | *port = sh->source; |
364 | from = &skb->nh.ipv6h->saddr; | 362 | from = &skb->nh.ipv6h->saddr; |
365 | } else { | 363 | } else { |
366 | *port = ntohs(sh->dest); | 364 | *port = sh->dest; |
367 | from = &skb->nh.ipv6h->daddr; | 365 | from = &skb->nh.ipv6h->daddr; |
368 | } | 366 | } |
369 | ipv6_addr_copy(&addr->v6.sin6_addr, from); | 367 | ipv6_addr_copy(&addr->v6.sin6_addr, from); |
@@ -373,7 +371,7 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb, | |||
373 | static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk) | 371 | static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk) |
374 | { | 372 | { |
375 | addr->v6.sin6_family = AF_INET6; | 373 | addr->v6.sin6_family = AF_INET6; |
376 | addr->v6.sin6_port = inet_sk(sk)->num; | 374 | addr->v6.sin6_port = 0; |
377 | addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr; | 375 | addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr; |
378 | } | 376 | } |
379 | 377 | ||
@@ -407,7 +405,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk) | |||
407 | /* Initialize a sctp_addr from an address parameter. */ | 405 | /* Initialize a sctp_addr from an address parameter. */ |
408 | static void sctp_v6_from_addr_param(union sctp_addr *addr, | 406 | static void sctp_v6_from_addr_param(union sctp_addr *addr, |
409 | union sctp_addr_param *param, | 407 | union sctp_addr_param *param, |
410 | __u16 port, int iif) | 408 | __be16 port, int iif) |
411 | { | 409 | { |
412 | addr->v6.sin6_family = AF_INET6; | 410 | addr->v6.sin6_family = AF_INET6; |
413 | addr->v6.sin6_port = port; | 411 | addr->v6.sin6_port = port; |
@@ -425,7 +423,7 @@ static int sctp_v6_to_addr_param(const union sctp_addr *addr, | |||
425 | int length = sizeof(sctp_ipv6addr_param_t); | 423 | int length = sizeof(sctp_ipv6addr_param_t); |
426 | 424 | ||
427 | param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS; | 425 | param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS; |
428 | param->v6.param_hdr.length = ntohs(length); | 426 | param->v6.param_hdr.length = htons(length); |
429 | ipv6_addr_copy(¶m->v6.addr, &addr->v6.sin6_addr); | 427 | ipv6_addr_copy(¶m->v6.addr, &addr->v6.sin6_addr); |
430 | 428 | ||
431 | return length; | 429 | return length; |
@@ -433,7 +431,7 @@ static int sctp_v6_to_addr_param(const union sctp_addr *addr, | |||
433 | 431 | ||
434 | /* Initialize a sctp_addr from a dst_entry. */ | 432 | /* Initialize a sctp_addr from a dst_entry. */ |
435 | static void sctp_v6_dst_saddr(union sctp_addr *addr, struct dst_entry *dst, | 433 | static void sctp_v6_dst_saddr(union sctp_addr *addr, struct dst_entry *dst, |
436 | unsigned short port) | 434 | __be16 port) |
437 | { | 435 | { |
438 | struct rt6_info *rt = (struct rt6_info *)dst; | 436 | struct rt6_info *rt = (struct rt6_info *)dst; |
439 | addr->sa.sa_family = AF_INET6; | 437 | addr->sa.sa_family = AF_INET6; |
@@ -480,7 +478,7 @@ static int sctp_v6_cmp_addr(const union sctp_addr *addr1, | |||
480 | } | 478 | } |
481 | 479 | ||
482 | /* Initialize addr struct to INADDR_ANY. */ | 480 | /* Initialize addr struct to INADDR_ANY. */ |
483 | static void sctp_v6_inaddr_any(union sctp_addr *addr, unsigned short port) | 481 | static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port) |
484 | { | 482 | { |
485 | memset(addr, 0x00, sizeof(union sctp_addr)); | 483 | memset(addr, 0x00, sizeof(union sctp_addr)); |
486 | addr->v6.sin6_family = AF_INET6; | 484 | addr->v6.sin6_family = AF_INET6; |
@@ -855,7 +853,7 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr) | |||
855 | * Returns number of addresses supported. | 853 | * Returns number of addresses supported. |
856 | */ | 854 | */ |
857 | static int sctp_inet6_supported_addrs(const struct sctp_sock *opt, | 855 | static int sctp_inet6_supported_addrs(const struct sctp_sock *opt, |
858 | __u16 *types) | 856 | __be16 *types) |
859 | { | 857 | { |
860 | types[0] = SCTP_PARAM_IPV4_ADDRESS; | 858 | types[0] = SCTP_PARAM_IPV4_ADDRESS; |
861 | types[1] = SCTP_PARAM_IPV6_ADDRESS; | 859 | types[1] = SCTP_PARAM_IPV6_ADDRESS; |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 739582415bf6..fba567a7cb64 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -1065,7 +1065,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) | |||
1065 | * A) Initialize the cacc_saw_newack to 0 for all destination | 1065 | * A) Initialize the cacc_saw_newack to 0 for all destination |
1066 | * addresses. | 1066 | * addresses. |
1067 | */ | 1067 | */ |
1068 | if (sack->num_gap_ack_blocks > 0 && | 1068 | if (sack->num_gap_ack_blocks && |
1069 | primary->cacc.changeover_active) { | 1069 | primary->cacc.changeover_active) { |
1070 | list_for_each(pos, transport_list) { | 1070 | list_for_each(pos, transport_list) { |
1071 | transport = list_entry(pos, struct sctp_transport, | 1071 | transport = list_entry(pos, struct sctp_transport, |
@@ -1632,7 +1632,7 @@ pass: | |||
1632 | } | 1632 | } |
1633 | 1633 | ||
1634 | static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist, | 1634 | static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist, |
1635 | int nskips, __u16 stream) | 1635 | int nskips, __be16 stream) |
1636 | { | 1636 | { |
1637 | int i; | 1637 | int i; |
1638 | 1638 | ||
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 7f49e769080e..b3493bdbcacb 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -160,7 +160,7 @@ static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_commo | |||
160 | 160 | ||
161 | list_for_each(pos, &epb->bind_addr.address_list) { | 161 | list_for_each(pos, &epb->bind_addr.address_list) { |
162 | laddr = list_entry(pos, struct sctp_sockaddr_entry, list); | 162 | laddr = list_entry(pos, struct sctp_sockaddr_entry, list); |
163 | addr = (union sctp_addr *)&laddr->a; | 163 | addr = &laddr->a; |
164 | af = sctp_get_af_specific(addr->sa.sa_family); | 164 | af = sctp_get_af_specific(addr->sa.sa_family); |
165 | if (primary && af->cmp_addr(addr, primary)) { | 165 | if (primary && af->cmp_addr(addr, primary)) { |
166 | seq_printf(seq, "*"); | 166 | seq_printf(seq, "*"); |
@@ -177,10 +177,10 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa | |||
177 | union sctp_addr *addr, *primary; | 177 | union sctp_addr *addr, *primary; |
178 | struct sctp_af *af; | 178 | struct sctp_af *af; |
179 | 179 | ||
180 | primary = &(assoc->peer.primary_addr); | 180 | primary = &assoc->peer.primary_addr; |
181 | list_for_each(pos, &assoc->peer.transport_addr_list) { | 181 | list_for_each(pos, &assoc->peer.transport_addr_list) { |
182 | transport = list_entry(pos, struct sctp_transport, transports); | 182 | transport = list_entry(pos, struct sctp_transport, transports); |
183 | addr = (union sctp_addr *)&transport->ipaddr; | 183 | addr = &transport->ipaddr; |
184 | af = sctp_get_af_specific(addr->sa.sa_family); | 184 | af = sctp_get_af_specific(addr->sa.sa_family); |
185 | if (af->cmp_addr(addr, primary)) { | 185 | if (af->cmp_addr(addr, primary)) { |
186 | seq_printf(seq, "*"); | 186 | seq_printf(seq, "*"); |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 5b4f82fd98f8..11f3b549f4a4 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -251,7 +251,7 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, | |||
251 | int is_saddr) | 251 | int is_saddr) |
252 | { | 252 | { |
253 | void *from; | 253 | void *from; |
254 | __u16 *port; | 254 | __be16 *port; |
255 | struct sctphdr *sh; | 255 | struct sctphdr *sh; |
256 | 256 | ||
257 | port = &addr->v4.sin_port; | 257 | port = &addr->v4.sin_port; |
@@ -259,10 +259,10 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, | |||
259 | 259 | ||
260 | sh = (struct sctphdr *) skb->h.raw; | 260 | sh = (struct sctphdr *) skb->h.raw; |
261 | if (is_saddr) { | 261 | if (is_saddr) { |
262 | *port = ntohs(sh->source); | 262 | *port = sh->source; |
263 | from = &skb->nh.iph->saddr; | 263 | from = &skb->nh.iph->saddr; |
264 | } else { | 264 | } else { |
265 | *port = ntohs(sh->dest); | 265 | *port = sh->dest; |
266 | from = &skb->nh.iph->daddr; | 266 | from = &skb->nh.iph->daddr; |
267 | } | 267 | } |
268 | memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr)); | 268 | memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr)); |
@@ -272,7 +272,7 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, | |||
272 | static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk) | 272 | static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk) |
273 | { | 273 | { |
274 | addr->v4.sin_family = AF_INET; | 274 | addr->v4.sin_family = AF_INET; |
275 | addr->v4.sin_port = inet_sk(sk)->num; | 275 | addr->v4.sin_port = 0; |
276 | addr->v4.sin_addr.s_addr = inet_sk(sk)->rcv_saddr; | 276 | addr->v4.sin_addr.s_addr = inet_sk(sk)->rcv_saddr; |
277 | } | 277 | } |
278 | 278 | ||
@@ -291,7 +291,7 @@ static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk) | |||
291 | /* Initialize a sctp_addr from an address parameter. */ | 291 | /* Initialize a sctp_addr from an address parameter. */ |
292 | static void sctp_v4_from_addr_param(union sctp_addr *addr, | 292 | static void sctp_v4_from_addr_param(union sctp_addr *addr, |
293 | union sctp_addr_param *param, | 293 | union sctp_addr_param *param, |
294 | __u16 port, int iif) | 294 | __be16 port, int iif) |
295 | { | 295 | { |
296 | addr->v4.sin_family = AF_INET; | 296 | addr->v4.sin_family = AF_INET; |
297 | addr->v4.sin_port = port; | 297 | addr->v4.sin_port = port; |
@@ -307,7 +307,7 @@ static int sctp_v4_to_addr_param(const union sctp_addr *addr, | |||
307 | int length = sizeof(sctp_ipv4addr_param_t); | 307 | int length = sizeof(sctp_ipv4addr_param_t); |
308 | 308 | ||
309 | param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS; | 309 | param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS; |
310 | param->v4.param_hdr.length = ntohs(length); | 310 | param->v4.param_hdr.length = htons(length); |
311 | param->v4.addr.s_addr = addr->v4.sin_addr.s_addr; | 311 | param->v4.addr.s_addr = addr->v4.sin_addr.s_addr; |
312 | 312 | ||
313 | return length; | 313 | return length; |
@@ -315,7 +315,7 @@ static int sctp_v4_to_addr_param(const union sctp_addr *addr, | |||
315 | 315 | ||
316 | /* Initialize a sctp_addr from a dst_entry. */ | 316 | /* Initialize a sctp_addr from a dst_entry. */ |
317 | static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct dst_entry *dst, | 317 | static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct dst_entry *dst, |
318 | unsigned short port) | 318 | __be16 port) |
319 | { | 319 | { |
320 | struct rtable *rt = (struct rtable *)dst; | 320 | struct rtable *rt = (struct rtable *)dst; |
321 | saddr->v4.sin_family = AF_INET; | 321 | saddr->v4.sin_family = AF_INET; |
@@ -338,7 +338,7 @@ static int sctp_v4_cmp_addr(const union sctp_addr *addr1, | |||
338 | } | 338 | } |
339 | 339 | ||
340 | /* Initialize addr struct to INADDR_ANY. */ | 340 | /* Initialize addr struct to INADDR_ANY. */ |
341 | static void sctp_v4_inaddr_any(union sctp_addr *addr, unsigned short port) | 341 | static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port) |
342 | { | 342 | { |
343 | addr->v4.sin_family = AF_INET; | 343 | addr->v4.sin_family = AF_INET; |
344 | addr->v4.sin_addr.s_addr = INADDR_ANY; | 344 | addr->v4.sin_addr.s_addr = INADDR_ANY; |
@@ -481,7 +481,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, | |||
481 | list); | 481 | list); |
482 | if (!laddr->use_as_src) | 482 | if (!laddr->use_as_src) |
483 | continue; | 483 | continue; |
484 | sctp_v4_dst_saddr(&dst_saddr, dst, bp->port); | 484 | sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); |
485 | if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) | 485 | if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) |
486 | goto out_unlock; | 486 | goto out_unlock; |
487 | } | 487 | } |
@@ -538,7 +538,7 @@ static void sctp_v4_get_saddr(struct sctp_association *asoc, | |||
538 | 538 | ||
539 | if (rt) { | 539 | if (rt) { |
540 | saddr->v4.sin_family = AF_INET; | 540 | saddr->v4.sin_family = AF_INET; |
541 | saddr->v4.sin_port = asoc->base.bind_addr.port; | 541 | saddr->v4.sin_port = htons(asoc->base.bind_addr.port); |
542 | saddr->v4.sin_addr.s_addr = rt->rt_src; | 542 | saddr->v4.sin_addr.s_addr = rt->rt_src; |
543 | } | 543 | } |
544 | } | 544 | } |
@@ -791,7 +791,7 @@ static int sctp_inet_send_verify(struct sctp_sock *opt, union sctp_addr *addr) | |||
791 | * chunks. Returns number of addresses supported. | 791 | * chunks. Returns number of addresses supported. |
792 | */ | 792 | */ |
793 | static int sctp_inet_supported_addrs(const struct sctp_sock *opt, | 793 | static int sctp_inet_supported_addrs(const struct sctp_sock *opt, |
794 | __u16 *types) | 794 | __be16 *types) |
795 | { | 795 | { |
796 | types[0] = SCTP_PARAM_IPV4_ADDRESS; | 796 | types[0] = SCTP_PARAM_IPV4_ADDRESS; |
797 | return 1; | 797 | return 1; |
@@ -808,7 +808,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, | |||
808 | NIPQUAD(((struct rtable *)skb->dst)->rt_dst)); | 808 | NIPQUAD(((struct rtable *)skb->dst)->rt_dst)); |
809 | 809 | ||
810 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); | 810 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); |
811 | return ip_queue_xmit(skb, ipfragok); | 811 | return ip_queue_xmit(skb, skb->sk, ipfragok); |
812 | } | 812 | } |
813 | 813 | ||
814 | static struct sctp_af sctp_ipv4_specific; | 814 | static struct sctp_af sctp_ipv4_specific; |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 507dff72c585..04954e5f6846 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -111,7 +111,7 @@ static const struct sctp_paramhdr prsctp_param = { | |||
111 | * provided chunk, as most cause codes will be embedded inside an | 111 | * provided chunk, as most cause codes will be embedded inside an |
112 | * abort chunk. | 112 | * abort chunk. |
113 | */ | 113 | */ |
114 | void sctp_init_cause(struct sctp_chunk *chunk, __u16 cause_code, | 114 | void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, |
115 | const void *payload, size_t paylen) | 115 | const void *payload, size_t paylen) |
116 | { | 116 | { |
117 | sctp_errhdr_t err; | 117 | sctp_errhdr_t err; |
@@ -183,7 +183,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | |||
183 | int num_types, addrs_len = 0; | 183 | int num_types, addrs_len = 0; |
184 | struct sctp_sock *sp; | 184 | struct sctp_sock *sp; |
185 | sctp_supported_addrs_param_t sat; | 185 | sctp_supported_addrs_param_t sat; |
186 | __u16 types[2]; | 186 | __be16 types[2]; |
187 | sctp_adaption_ind_param_t aiparam; | 187 | sctp_adaption_ind_param_t aiparam; |
188 | 188 | ||
189 | /* RFC 2960 3.3.2 Initiation (INIT) (1) | 189 | /* RFC 2960 3.3.2 Initiation (INIT) (1) |
@@ -775,7 +775,7 @@ struct sctp_chunk *sctp_make_abort_no_data( | |||
775 | const struct sctp_chunk *chunk, __u32 tsn) | 775 | const struct sctp_chunk *chunk, __u32 tsn) |
776 | { | 776 | { |
777 | struct sctp_chunk *retval; | 777 | struct sctp_chunk *retval; |
778 | __u32 payload; | 778 | __be32 payload; |
779 | 779 | ||
780 | retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) | 780 | retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) |
781 | + sizeof(tsn)); | 781 | + sizeof(tsn)); |
@@ -951,7 +951,7 @@ nodata: | |||
951 | /* Create an Operation Error chunk. */ | 951 | /* Create an Operation Error chunk. */ |
952 | struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, | 952 | struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, |
953 | const struct sctp_chunk *chunk, | 953 | const struct sctp_chunk *chunk, |
954 | __u16 cause_code, const void *payload, | 954 | __be16 cause_code, const void *payload, |
955 | size_t paylen) | 955 | size_t paylen) |
956 | { | 956 | { |
957 | struct sctp_chunk *retval; | 957 | struct sctp_chunk *retval; |
@@ -1190,15 +1190,14 @@ void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) | |||
1190 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { | 1190 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { |
1191 | ssn = 0; | 1191 | ssn = 0; |
1192 | } else { | 1192 | } else { |
1193 | sid = htons(chunk->subh.data_hdr->stream); | 1193 | sid = ntohs(chunk->subh.data_hdr->stream); |
1194 | if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) | 1194 | if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) |
1195 | ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid); | 1195 | ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid); |
1196 | else | 1196 | else |
1197 | ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid); | 1197 | ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid); |
1198 | ssn = htons(ssn); | ||
1199 | } | 1198 | } |
1200 | 1199 | ||
1201 | chunk->subh.data_hdr->ssn = ssn; | 1200 | chunk->subh.data_hdr->ssn = htons(ssn); |
1202 | chunk->has_ssn = 1; | 1201 | chunk->has_ssn = 1; |
1203 | } | 1202 | } |
1204 | 1203 | ||
@@ -1280,15 +1279,13 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, | |||
1280 | - (bodysize % SCTP_COOKIE_MULTIPLE); | 1279 | - (bodysize % SCTP_COOKIE_MULTIPLE); |
1281 | *cookie_len = headersize + bodysize; | 1280 | *cookie_len = headersize + bodysize; |
1282 | 1281 | ||
1283 | retval = kmalloc(*cookie_len, GFP_ATOMIC); | ||
1284 | |||
1285 | if (!retval) | ||
1286 | goto nodata; | ||
1287 | |||
1288 | /* Clear this memory since we are sending this data structure | 1282 | /* Clear this memory since we are sending this data structure |
1289 | * out on the network. | 1283 | * out on the network. |
1290 | */ | 1284 | */ |
1291 | memset(retval, 0x00, *cookie_len); | 1285 | retval = kzalloc(*cookie_len, GFP_ATOMIC); |
1286 | if (!retval) | ||
1287 | goto nodata; | ||
1288 | |||
1292 | cookie = (struct sctp_signed_cookie *) retval->body; | 1289 | cookie = (struct sctp_signed_cookie *) retval->body; |
1293 | 1290 | ||
1294 | /* Set up the parameter header. */ | 1291 | /* Set up the parameter header. */ |
@@ -1438,7 +1435,7 @@ no_hmac: | |||
1438 | goto fail; | 1435 | goto fail; |
1439 | } | 1436 | } |
1440 | 1437 | ||
1441 | if (ntohs(chunk->sctp_hdr->source) != bear_cookie->peer_addr.v4.sin_port || | 1438 | if (chunk->sctp_hdr->source != bear_cookie->peer_addr.v4.sin_port || |
1442 | ntohs(chunk->sctp_hdr->dest) != bear_cookie->my_port) { | 1439 | ntohs(chunk->sctp_hdr->dest) != bear_cookie->my_port) { |
1443 | *error = -SCTP_IERROR_BAD_PORTS; | 1440 | *error = -SCTP_IERROR_BAD_PORTS; |
1444 | goto fail; | 1441 | goto fail; |
@@ -1473,10 +1470,10 @@ no_hmac: | |||
1473 | suseconds_t usecs = (tv.tv_sec - | 1470 | suseconds_t usecs = (tv.tv_sec - |
1474 | bear_cookie->expiration.tv_sec) * 1000000L + | 1471 | bear_cookie->expiration.tv_sec) * 1000000L + |
1475 | tv.tv_usec - bear_cookie->expiration.tv_usec; | 1472 | tv.tv_usec - bear_cookie->expiration.tv_usec; |
1473 | __be32 n = htonl(usecs); | ||
1476 | 1474 | ||
1477 | usecs = htonl(usecs); | ||
1478 | sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, | 1475 | sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, |
1479 | &usecs, sizeof(usecs)); | 1476 | &n, sizeof(n)); |
1480 | *error = -SCTP_IERROR_STALE_COOKIE; | 1477 | *error = -SCTP_IERROR_STALE_COOKIE; |
1481 | } else | 1478 | } else |
1482 | *error = -SCTP_IERROR_NOMEM; | 1479 | *error = -SCTP_IERROR_NOMEM; |
@@ -1539,8 +1536,8 @@ malformed: | |||
1539 | ********************************************************************/ | 1536 | ********************************************************************/ |
1540 | 1537 | ||
1541 | struct __sctp_missing { | 1538 | struct __sctp_missing { |
1542 | __u32 num_missing; | 1539 | __be32 num_missing; |
1543 | __u16 type; | 1540 | __be16 type; |
1544 | } __attribute__((packed)); | 1541 | } __attribute__((packed)); |
1545 | 1542 | ||
1546 | /* | 1543 | /* |
@@ -1852,9 +1849,10 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, | |||
1852 | * added as the primary transport. The source address seems to | 1849 | * added as the primary transport. The source address seems to |
1853 | * be a a better choice than any of the embedded addresses. | 1850 | * be a a better choice than any of the embedded addresses. |
1854 | */ | 1851 | */ |
1855 | if (peer_addr) | 1852 | if (peer_addr) { |
1856 | if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) | 1853 | if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) |
1857 | goto nomem; | 1854 | goto nomem; |
1855 | } | ||
1858 | 1856 | ||
1859 | /* Process the initialization parameters. */ | 1857 | /* Process the initialization parameters. */ |
1860 | 1858 | ||
@@ -1910,10 +1908,9 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, | |||
1910 | /* Copy cookie in case we need to resend COOKIE-ECHO. */ | 1908 | /* Copy cookie in case we need to resend COOKIE-ECHO. */ |
1911 | cookie = asoc->peer.cookie; | 1909 | cookie = asoc->peer.cookie; |
1912 | if (cookie) { | 1910 | if (cookie) { |
1913 | asoc->peer.cookie = kmalloc(asoc->peer.cookie_len, gfp); | 1911 | asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp); |
1914 | if (!asoc->peer.cookie) | 1912 | if (!asoc->peer.cookie) |
1915 | goto clean_up; | 1913 | goto clean_up; |
1916 | memcpy(asoc->peer.cookie, cookie, asoc->peer.cookie_len); | ||
1917 | } | 1914 | } |
1918 | 1915 | ||
1919 | /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily | 1916 | /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily |
@@ -2027,7 +2024,7 @@ static int sctp_process_param(struct sctp_association *asoc, | |||
2027 | /* Fall through. */ | 2024 | /* Fall through. */ |
2028 | case SCTP_PARAM_IPV4_ADDRESS: | 2025 | case SCTP_PARAM_IPV4_ADDRESS: |
2029 | af = sctp_get_af_specific(param_type2af(param.p->type)); | 2026 | af = sctp_get_af_specific(param_type2af(param.p->type)); |
2030 | af->from_addr_param(&addr, param.addr, asoc->peer.port, 0); | 2027 | af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0); |
2031 | scope = sctp_scope(peer_addr); | 2028 | scope = sctp_scope(peer_addr); |
2032 | if (sctp_in_scope(&addr, scope)) | 2029 | if (sctp_in_scope(&addr, scope)) |
2033 | if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) | 2030 | if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) |
@@ -2230,7 +2227,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, | |||
2230 | union sctp_addr *laddr, | 2227 | union sctp_addr *laddr, |
2231 | struct sockaddr *addrs, | 2228 | struct sockaddr *addrs, |
2232 | int addrcnt, | 2229 | int addrcnt, |
2233 | __u16 flags) | 2230 | __be16 flags) |
2234 | { | 2231 | { |
2235 | sctp_addip_param_t param; | 2232 | sctp_addip_param_t param; |
2236 | struct sctp_chunk *retval; | 2233 | struct sctp_chunk *retval; |
@@ -2363,14 +2360,14 @@ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *as | |||
2363 | } | 2360 | } |
2364 | 2361 | ||
2365 | /* Add response parameters to an ASCONF_ACK chunk. */ | 2362 | /* Add response parameters to an ASCONF_ACK chunk. */ |
2366 | static void sctp_add_asconf_response(struct sctp_chunk *chunk, __u32 crr_id, | 2363 | static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id, |
2367 | __u16 err_code, sctp_addip_param_t *asconf_param) | 2364 | __be16 err_code, sctp_addip_param_t *asconf_param) |
2368 | { | 2365 | { |
2369 | sctp_addip_param_t ack_param; | 2366 | sctp_addip_param_t ack_param; |
2370 | sctp_errhdr_t err_param; | 2367 | sctp_errhdr_t err_param; |
2371 | int asconf_param_len = 0; | 2368 | int asconf_param_len = 0; |
2372 | int err_param_len = 0; | 2369 | int err_param_len = 0; |
2373 | __u16 response_type; | 2370 | __be16 response_type; |
2374 | 2371 | ||
2375 | if (SCTP_ERROR_NO_ERROR == err_code) { | 2372 | if (SCTP_ERROR_NO_ERROR == err_code) { |
2376 | response_type = SCTP_PARAM_SUCCESS_REPORT; | 2373 | response_type = SCTP_PARAM_SUCCESS_REPORT; |
@@ -2404,7 +2401,7 @@ static void sctp_add_asconf_response(struct sctp_chunk *chunk, __u32 crr_id, | |||
2404 | } | 2401 | } |
2405 | 2402 | ||
2406 | /* Process a asconf parameter. */ | 2403 | /* Process a asconf parameter. */ |
2407 | static __u16 sctp_process_asconf_param(struct sctp_association *asoc, | 2404 | static __be16 sctp_process_asconf_param(struct sctp_association *asoc, |
2408 | struct sctp_chunk *asconf, | 2405 | struct sctp_chunk *asconf, |
2409 | sctp_addip_param_t *asconf_param) | 2406 | sctp_addip_param_t *asconf_param) |
2410 | { | 2407 | { |
@@ -2413,7 +2410,7 @@ static __u16 sctp_process_asconf_param(struct sctp_association *asoc, | |||
2413 | union sctp_addr addr; | 2410 | union sctp_addr addr; |
2414 | struct list_head *pos; | 2411 | struct list_head *pos; |
2415 | union sctp_addr_param *addr_param; | 2412 | union sctp_addr_param *addr_param; |
2416 | 2413 | ||
2417 | addr_param = (union sctp_addr_param *) | 2414 | addr_param = (union sctp_addr_param *) |
2418 | ((void *)asconf_param + sizeof(sctp_addip_param_t)); | 2415 | ((void *)asconf_param + sizeof(sctp_addip_param_t)); |
2419 | 2416 | ||
@@ -2421,7 +2418,7 @@ static __u16 sctp_process_asconf_param(struct sctp_association *asoc, | |||
2421 | if (unlikely(!af)) | 2418 | if (unlikely(!af)) |
2422 | return SCTP_ERROR_INV_PARAM; | 2419 | return SCTP_ERROR_INV_PARAM; |
2423 | 2420 | ||
2424 | af->from_addr_param(&addr, addr_param, asoc->peer.port, 0); | 2421 | af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); |
2425 | switch (asconf_param->param_hdr.type) { | 2422 | switch (asconf_param->param_hdr.type) { |
2426 | case SCTP_PARAM_ADD_IP: | 2423 | case SCTP_PARAM_ADD_IP: |
2427 | /* ADDIP 4.3 D9) If an endpoint receives an ADD IP address | 2424 | /* ADDIP 4.3 D9) If an endpoint receives an ADD IP address |
@@ -2487,7 +2484,7 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, | |||
2487 | sctp_addip_param_t *asconf_param; | 2484 | sctp_addip_param_t *asconf_param; |
2488 | struct sctp_chunk *asconf_ack; | 2485 | struct sctp_chunk *asconf_ack; |
2489 | 2486 | ||
2490 | __u16 err_code; | 2487 | __be16 err_code; |
2491 | int length = 0; | 2488 | int length = 0; |
2492 | int chunk_len = asconf->skb->len; | 2489 | int chunk_len = asconf->skb->len; |
2493 | __u32 serial; | 2490 | __u32 serial; |
@@ -2586,7 +2583,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc, | |||
2586 | 2583 | ||
2587 | /* We have checked the packet before, so we do not check again. */ | 2584 | /* We have checked the packet before, so we do not check again. */ |
2588 | af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); | 2585 | af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); |
2589 | af->from_addr_param(&addr, addr_param, bp->port, 0); | 2586 | af->from_addr_param(&addr, addr_param, htons(bp->port), 0); |
2590 | 2587 | ||
2591 | switch (asconf_param->param_hdr.type) { | 2588 | switch (asconf_param->param_hdr.type) { |
2592 | case SCTP_PARAM_ADD_IP: | 2589 | case SCTP_PARAM_ADD_IP: |
@@ -2630,7 +2627,7 @@ static int sctp_asconf_param_success(struct sctp_association *asoc, | |||
2630 | * All TLVs after the failed response are considered unsuccessful unless a | 2627 | * All TLVs after the failed response are considered unsuccessful unless a |
2631 | * specific success indication is present for the parameter. | 2628 | * specific success indication is present for the parameter. |
2632 | */ | 2629 | */ |
2633 | static __u16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, | 2630 | static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, |
2634 | sctp_addip_param_t *asconf_param, | 2631 | sctp_addip_param_t *asconf_param, |
2635 | int no_err) | 2632 | int no_err) |
2636 | { | 2633 | { |
@@ -2638,7 +2635,7 @@ static __u16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, | |||
2638 | sctp_errhdr_t *err_param; | 2635 | sctp_errhdr_t *err_param; |
2639 | int length; | 2636 | int length; |
2640 | int asconf_ack_len = asconf_ack->skb->len; | 2637 | int asconf_ack_len = asconf_ack->skb->len; |
2641 | __u16 err_code; | 2638 | __be16 err_code; |
2642 | 2639 | ||
2643 | if (no_err) | 2640 | if (no_err) |
2644 | err_code = SCTP_ERROR_NO_ERROR; | 2641 | err_code = SCTP_ERROR_NO_ERROR; |
@@ -2694,7 +2691,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, | |||
2694 | int all_param_pass = 0; | 2691 | int all_param_pass = 0; |
2695 | int no_err = 1; | 2692 | int no_err = 1; |
2696 | int retval = 0; | 2693 | int retval = 0; |
2697 | __u16 err_code = SCTP_ERROR_NO_ERROR; | 2694 | __be16 err_code = SCTP_ERROR_NO_ERROR; |
2698 | 2695 | ||
2699 | /* Skip the chunkhdr and addiphdr from the last asconf sent and store | 2696 | /* Skip the chunkhdr and addiphdr from the last asconf sent and store |
2700 | * a pointer to address parameter. | 2697 | * a pointer to address parameter. |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 9c10bdec1afe..7bbc6156e455 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -442,7 +442,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, | |||
442 | " transport IP: port:%d failed.\n", | 442 | " transport IP: port:%d failed.\n", |
443 | asoc, | 443 | asoc, |
444 | (&transport->ipaddr), | 444 | (&transport->ipaddr), |
445 | transport->ipaddr.v4.sin_port); | 445 | ntohs(transport->ipaddr.v4.sin_port)); |
446 | sctp_assoc_control_transport(asoc, transport, | 446 | sctp_assoc_control_transport(asoc, transport, |
447 | SCTP_TRANSPORT_DOWN, | 447 | SCTP_TRANSPORT_DOWN, |
448 | SCTP_FAILED_THRESHOLD); | 448 | SCTP_FAILED_THRESHOLD); |
@@ -1360,12 +1360,12 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1360 | break; | 1360 | break; |
1361 | 1361 | ||
1362 | case SCTP_CMD_INIT_FAILED: | 1362 | case SCTP_CMD_INIT_FAILED: |
1363 | sctp_cmd_init_failed(commands, asoc, cmd->obj.u32); | 1363 | sctp_cmd_init_failed(commands, asoc, cmd->obj.err); |
1364 | break; | 1364 | break; |
1365 | 1365 | ||
1366 | case SCTP_CMD_ASSOC_FAILED: | 1366 | case SCTP_CMD_ASSOC_FAILED: |
1367 | sctp_cmd_assoc_failed(commands, asoc, event_type, | 1367 | sctp_cmd_assoc_failed(commands, asoc, event_type, |
1368 | subtype, chunk, cmd->obj.u32); | 1368 | subtype, chunk, cmd->obj.err); |
1369 | break; | 1369 | break; |
1370 | 1370 | ||
1371 | case SCTP_CMD_INIT_COUNTER_INC: | 1371 | case SCTP_CMD_INIT_COUNTER_INC: |
@@ -1420,7 +1420,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1420 | 1420 | ||
1421 | case SCTP_CMD_PROCESS_CTSN: | 1421 | case SCTP_CMD_PROCESS_CTSN: |
1422 | /* Dummy up a SACK for processing. */ | 1422 | /* Dummy up a SACK for processing. */ |
1423 | sackh.cum_tsn_ack = cmd->obj.u32; | 1423 | sackh.cum_tsn_ack = cmd->obj.be32; |
1424 | sackh.a_rwnd = 0; | 1424 | sackh.a_rwnd = 0; |
1425 | sackh.num_gap_ack_blocks = 0; | 1425 | sackh.num_gap_ack_blocks = 0; |
1426 | sackh.num_dup_tsns = 0; | 1426 | sackh.num_dup_tsns = 0; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 1c42fe983a5b..27cc444aaf11 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -93,7 +93,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, | |||
93 | static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); | 93 | static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); |
94 | 94 | ||
95 | static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, | 95 | static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, |
96 | __u16 error, int sk_err, | 96 | __be16 error, int sk_err, |
97 | const struct sctp_association *asoc, | 97 | const struct sctp_association *asoc, |
98 | struct sctp_transport *transport); | 98 | struct sctp_transport *transport); |
99 | 99 | ||
@@ -443,7 +443,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, | |||
443 | __u32 init_tag; | 443 | __u32 init_tag; |
444 | struct sctp_chunk *err_chunk; | 444 | struct sctp_chunk *err_chunk; |
445 | struct sctp_packet *packet; | 445 | struct sctp_packet *packet; |
446 | __u16 error; | 446 | sctp_error_t error; |
447 | 447 | ||
448 | if (!sctp_vtag_verify(chunk, asoc)) | 448 | if (!sctp_vtag_verify(chunk, asoc)) |
449 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 449 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
@@ -886,7 +886,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, | |||
886 | SCTP_ERROR(ETIMEDOUT)); | 886 | SCTP_ERROR(ETIMEDOUT)); |
887 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | 887 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ |
888 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 888 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
889 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 889 | SCTP_PERR(SCTP_ERROR_NO_ERROR)); |
890 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 890 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
891 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 891 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); |
892 | return SCTP_DISPOSITION_DELETE_TCB; | 892 | return SCTP_DISPOSITION_DELETE_TCB; |
@@ -2138,7 +2138,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, | |||
2138 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 2138 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
2139 | SCTP_ERROR(ETIMEDOUT)); | 2139 | SCTP_ERROR(ETIMEDOUT)); |
2140 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 2140 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
2141 | SCTP_U32(SCTP_ERROR_STALE_COOKIE)); | 2141 | SCTP_PERR(SCTP_ERROR_STALE_COOKIE)); |
2142 | return SCTP_DISPOSITION_DELETE_TCB; | 2142 | return SCTP_DISPOSITION_DELETE_TCB; |
2143 | } | 2143 | } |
2144 | 2144 | ||
@@ -2158,7 +2158,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, | |||
2158 | * to give ample time to retransmit the new cookie and thus | 2158 | * to give ample time to retransmit the new cookie and thus |
2159 | * yield a higher probability of success on the reattempt. | 2159 | * yield a higher probability of success on the reattempt. |
2160 | */ | 2160 | */ |
2161 | stale = ntohl(*(suseconds_t *)((u8 *)err + sizeof(sctp_errhdr_t))); | 2161 | stale = ntohl(*(__be32 *)((u8 *)err + sizeof(sctp_errhdr_t))); |
2162 | stale = (stale * 2) / 1000; | 2162 | stale = (stale * 2) / 1000; |
2163 | 2163 | ||
2164 | bht.param_hdr.type = SCTP_PARAM_COOKIE_PRESERVATIVE; | 2164 | bht.param_hdr.type = SCTP_PARAM_COOKIE_PRESERVATIVE; |
@@ -2250,7 +2250,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep, | |||
2250 | { | 2250 | { |
2251 | struct sctp_chunk *chunk = arg; | 2251 | struct sctp_chunk *chunk = arg; |
2252 | unsigned len; | 2252 | unsigned len; |
2253 | __u16 error = SCTP_ERROR_NO_ERROR; | 2253 | __be16 error = SCTP_ERROR_NO_ERROR; |
2254 | 2254 | ||
2255 | if (!sctp_vtag_verify_either(chunk, asoc)) | 2255 | if (!sctp_vtag_verify_either(chunk, asoc)) |
2256 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 2256 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
@@ -2275,7 +2275,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep, | |||
2275 | 2275 | ||
2276 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); | 2276 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); |
2277 | /* ASSOC_FAILED will DELETE_TCB. */ | 2277 | /* ASSOC_FAILED will DELETE_TCB. */ |
2278 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error)); | 2278 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error)); |
2279 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 2279 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
2280 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 2280 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); |
2281 | 2281 | ||
@@ -2295,7 +2295,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep, | |||
2295 | { | 2295 | { |
2296 | struct sctp_chunk *chunk = arg; | 2296 | struct sctp_chunk *chunk = arg; |
2297 | unsigned len; | 2297 | unsigned len; |
2298 | __u16 error = SCTP_ERROR_NO_ERROR; | 2298 | __be16 error = SCTP_ERROR_NO_ERROR; |
2299 | 2299 | ||
2300 | if (!sctp_vtag_verify_either(chunk, asoc)) | 2300 | if (!sctp_vtag_verify_either(chunk, asoc)) |
2301 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 2301 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
@@ -2357,7 +2357,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep, | |||
2357 | * This is common code called by several sctp_sf_*_abort() functions above. | 2357 | * This is common code called by several sctp_sf_*_abort() functions above. |
2358 | */ | 2358 | */ |
2359 | static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, | 2359 | static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, |
2360 | __u16 error, int sk_err, | 2360 | __be16 error, int sk_err, |
2361 | const struct sctp_association *asoc, | 2361 | const struct sctp_association *asoc, |
2362 | struct sctp_transport *transport) | 2362 | struct sctp_transport *transport) |
2363 | { | 2363 | { |
@@ -2370,7 +2370,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, | |||
2370 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err)); | 2370 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err)); |
2371 | /* CMD_INIT_FAILED will DELETE_TCB. */ | 2371 | /* CMD_INIT_FAILED will DELETE_TCB. */ |
2372 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 2372 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
2373 | SCTP_U32(error)); | 2373 | SCTP_PERR(error)); |
2374 | return SCTP_DISPOSITION_ABORT; | 2374 | return SCTP_DISPOSITION_ABORT; |
2375 | } | 2375 | } |
2376 | 2376 | ||
@@ -2466,7 +2466,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep, | |||
2466 | * received by the SHUTDOWN sender. | 2466 | * received by the SHUTDOWN sender. |
2467 | */ | 2467 | */ |
2468 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN, | 2468 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN, |
2469 | SCTP_U32(chunk->subh.shutdown_hdr->cum_tsn_ack)); | 2469 | SCTP_BE32(chunk->subh.shutdown_hdr->cum_tsn_ack)); |
2470 | 2470 | ||
2471 | out: | 2471 | out: |
2472 | return disposition; | 2472 | return disposition; |
@@ -2545,6 +2545,7 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep, | |||
2545 | { | 2545 | { |
2546 | sctp_cwrhdr_t *cwr; | 2546 | sctp_cwrhdr_t *cwr; |
2547 | struct sctp_chunk *chunk = arg; | 2547 | struct sctp_chunk *chunk = arg; |
2548 | u32 lowest_tsn; | ||
2548 | 2549 | ||
2549 | if (!sctp_vtag_verify(chunk, asoc)) | 2550 | if (!sctp_vtag_verify(chunk, asoc)) |
2550 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 2551 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
@@ -2556,14 +2557,14 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep, | |||
2556 | cwr = (sctp_cwrhdr_t *) chunk->skb->data; | 2557 | cwr = (sctp_cwrhdr_t *) chunk->skb->data; |
2557 | skb_pull(chunk->skb, sizeof(sctp_cwrhdr_t)); | 2558 | skb_pull(chunk->skb, sizeof(sctp_cwrhdr_t)); |
2558 | 2559 | ||
2559 | cwr->lowest_tsn = ntohl(cwr->lowest_tsn); | 2560 | lowest_tsn = ntohl(cwr->lowest_tsn); |
2560 | 2561 | ||
2561 | /* Does this CWR ack the last sent congestion notification? */ | 2562 | /* Does this CWR ack the last sent congestion notification? */ |
2562 | if (TSN_lte(asoc->last_ecne_tsn, cwr->lowest_tsn)) { | 2563 | if (TSN_lte(asoc->last_ecne_tsn, lowest_tsn)) { |
2563 | /* Stop sending ECNE. */ | 2564 | /* Stop sending ECNE. */ |
2564 | sctp_add_cmd_sf(commands, | 2565 | sctp_add_cmd_sf(commands, |
2565 | SCTP_CMD_ECN_CWR, | 2566 | SCTP_CMD_ECN_CWR, |
2566 | SCTP_U32(cwr->lowest_tsn)); | 2567 | SCTP_U32(lowest_tsn)); |
2567 | } | 2568 | } |
2568 | return SCTP_DISPOSITION_CONSUME; | 2569 | return SCTP_DISPOSITION_CONSUME; |
2569 | } | 2570 | } |
@@ -3360,7 +3361,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3360 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 3361 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
3361 | SCTP_ERROR(ECONNABORTED)); | 3362 | SCTP_ERROR(ECONNABORTED)); |
3362 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 3363 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
3363 | SCTP_U32(SCTP_ERROR_ASCONF_ACK)); | 3364 | SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); |
3364 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 3365 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
3365 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 3366 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); |
3366 | return SCTP_DISPOSITION_ABORT; | 3367 | return SCTP_DISPOSITION_ABORT; |
@@ -3388,7 +3389,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3388 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 3389 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
3389 | SCTP_ERROR(ECONNABORTED)); | 3390 | SCTP_ERROR(ECONNABORTED)); |
3390 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 3391 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
3391 | SCTP_U32(SCTP_ERROR_ASCONF_ACK)); | 3392 | SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); |
3392 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 3393 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
3393 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 3394 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); |
3394 | return SCTP_DISPOSITION_ABORT; | 3395 | return SCTP_DISPOSITION_ABORT; |
@@ -3743,12 +3744,12 @@ static sctp_disposition_t sctp_sf_violation_chunklen( | |||
3743 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 3744 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
3744 | SCTP_ERROR(ECONNREFUSED)); | 3745 | SCTP_ERROR(ECONNREFUSED)); |
3745 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 3746 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
3746 | SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); | 3747 | SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); |
3747 | } else { | 3748 | } else { |
3748 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 3749 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
3749 | SCTP_ERROR(ECONNABORTED)); | 3750 | SCTP_ERROR(ECONNABORTED)); |
3750 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 3751 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
3751 | SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); | 3752 | SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); |
3752 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 3753 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); |
3753 | } | 3754 | } |
3754 | 3755 | ||
@@ -4062,7 +4063,7 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort( | |||
4062 | SCTP_ERROR(ECONNABORTED)); | 4063 | SCTP_ERROR(ECONNABORTED)); |
4063 | /* Delete the established association. */ | 4064 | /* Delete the established association. */ |
4064 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 4065 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
4065 | SCTP_U32(SCTP_ERROR_USER_ABORT)); | 4066 | SCTP_PERR(SCTP_ERROR_USER_ABORT)); |
4066 | 4067 | ||
4067 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 4068 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
4068 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 4069 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); |
@@ -4199,7 +4200,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( | |||
4199 | SCTP_ERROR(ECONNREFUSED)); | 4200 | SCTP_ERROR(ECONNREFUSED)); |
4200 | /* Delete the established association. */ | 4201 | /* Delete the established association. */ |
4201 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 4202 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
4202 | SCTP_U32(SCTP_ERROR_USER_ABORT)); | 4203 | SCTP_PERR(SCTP_ERROR_USER_ABORT)); |
4203 | 4204 | ||
4204 | return retval; | 4205 | return retval; |
4205 | } | 4206 | } |
@@ -4571,7 +4572,7 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep, | |||
4571 | SCTP_ERROR(ETIMEDOUT)); | 4572 | SCTP_ERROR(ETIMEDOUT)); |
4572 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | 4573 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ |
4573 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 4574 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
4574 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 4575 | SCTP_PERR(SCTP_ERROR_NO_ERROR)); |
4575 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 4576 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
4576 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 4577 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); |
4577 | return SCTP_DISPOSITION_DELETE_TCB; | 4578 | return SCTP_DISPOSITION_DELETE_TCB; |
@@ -4693,7 +4694,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep, | |||
4693 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 4694 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
4694 | SCTP_ERROR(ETIMEDOUT)); | 4695 | SCTP_ERROR(ETIMEDOUT)); |
4695 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 4696 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
4696 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 4697 | SCTP_PERR(SCTP_ERROR_NO_ERROR)); |
4697 | return SCTP_DISPOSITION_DELETE_TCB; | 4698 | return SCTP_DISPOSITION_DELETE_TCB; |
4698 | } | 4699 | } |
4699 | 4700 | ||
@@ -4745,7 +4746,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep | |||
4745 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 4746 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
4746 | SCTP_ERROR(ETIMEDOUT)); | 4747 | SCTP_ERROR(ETIMEDOUT)); |
4747 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 4748 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
4748 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 4749 | SCTP_PERR(SCTP_ERROR_NO_ERROR)); |
4749 | return SCTP_DISPOSITION_DELETE_TCB; | 4750 | return SCTP_DISPOSITION_DELETE_TCB; |
4750 | } | 4751 | } |
4751 | 4752 | ||
@@ -4781,7 +4782,7 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep, | |||
4781 | SCTP_ERROR(ETIMEDOUT)); | 4782 | SCTP_ERROR(ETIMEDOUT)); |
4782 | /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | 4783 | /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ |
4783 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 4784 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
4784 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 4785 | SCTP_PERR(SCTP_ERROR_NO_ERROR)); |
4785 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 4786 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
4786 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 4787 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); |
4787 | return SCTP_DISPOSITION_DELETE_TCB; | 4788 | return SCTP_DISPOSITION_DELETE_TCB; |
@@ -4859,7 +4860,7 @@ sctp_disposition_t sctp_sf_t4_timer_expire( | |||
4859 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 4860 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
4860 | SCTP_ERROR(ETIMEDOUT)); | 4861 | SCTP_ERROR(ETIMEDOUT)); |
4861 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 4862 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
4862 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 4863 | SCTP_PERR(SCTP_ERROR_NO_ERROR)); |
4863 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 4864 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
4864 | SCTP_INC_STATS(SCTP_MIB_CURRESTAB); | 4865 | SCTP_INC_STATS(SCTP_MIB_CURRESTAB); |
4865 | return SCTP_DISPOSITION_ABORT; | 4866 | return SCTP_DISPOSITION_ABORT; |
@@ -4915,7 +4916,7 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep, | |||
4915 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 4916 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
4916 | SCTP_ERROR(ETIMEDOUT)); | 4917 | SCTP_ERROR(ETIMEDOUT)); |
4917 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 4918 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
4918 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 4919 | SCTP_PERR(SCTP_ERROR_NO_ERROR)); |
4919 | 4920 | ||
4920 | return SCTP_DISPOSITION_DELETE_TCB; | 4921 | return SCTP_DISPOSITION_DELETE_TCB; |
4921 | nomem: | 4922 | nomem: |
@@ -5365,7 +5366,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5365 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 5366 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
5366 | SCTP_ERROR(ECONNABORTED)); | 5367 | SCTP_ERROR(ECONNABORTED)); |
5367 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 5368 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
5368 | SCTP_U32(SCTP_ERROR_NO_DATA)); | 5369 | SCTP_PERR(SCTP_ERROR_NO_DATA)); |
5369 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 5370 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
5370 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 5371 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); |
5371 | return SCTP_IERROR_NO_DATA; | 5372 | return SCTP_IERROR_NO_DATA; |
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 8bcca5676151..733dd87b3a7d 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c | |||
@@ -104,325 +104,322 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | |||
104 | }; | 104 | }; |
105 | } | 105 | } |
106 | 106 | ||
107 | #define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func} | ||
108 | |||
107 | #define TYPE_SCTP_DATA { \ | 109 | #define TYPE_SCTP_DATA { \ |
108 | /* SCTP_STATE_EMPTY */ \ | 110 | /* SCTP_STATE_EMPTY */ \ |
109 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 111 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
110 | /* SCTP_STATE_CLOSED */ \ | 112 | /* SCTP_STATE_CLOSED */ \ |
111 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | 113 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ |
112 | /* SCTP_STATE_COOKIE_WAIT */ \ | 114 | /* SCTP_STATE_COOKIE_WAIT */ \ |
113 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 115 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
114 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 116 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
115 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 117 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
116 | /* SCTP_STATE_ESTABLISHED */ \ | 118 | /* SCTP_STATE_ESTABLISHED */ \ |
117 | {.fn = sctp_sf_eat_data_6_2, .name = "sctp_sf_eat_data_6_2"}, \ | 119 | TYPE_SCTP_FUNC(sctp_sf_eat_data_6_2), \ |
118 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 120 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
119 | {.fn = sctp_sf_eat_data_6_2, .name = "sctp_sf_eat_data_6_2"}, \ | 121 | TYPE_SCTP_FUNC(sctp_sf_eat_data_6_2), \ |
120 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 122 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
121 | {.fn = sctp_sf_eat_data_fast_4_4, .name = "sctp_sf_eat_data_fast_4_4"}, \ | 123 | TYPE_SCTP_FUNC(sctp_sf_eat_data_fast_4_4), \ |
122 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 124 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
123 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 125 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
124 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 126 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
125 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 127 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
126 | } /* TYPE_SCTP_DATA */ | 128 | } /* TYPE_SCTP_DATA */ |
127 | 129 | ||
128 | #define TYPE_SCTP_INIT { \ | 130 | #define TYPE_SCTP_INIT { \ |
129 | /* SCTP_STATE_EMPTY */ \ | 131 | /* SCTP_STATE_EMPTY */ \ |
130 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 132 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
131 | /* SCTP_STATE_CLOSED */ \ | 133 | /* SCTP_STATE_CLOSED */ \ |
132 | {.fn = sctp_sf_do_5_1B_init, .name = "sctp_sf_do_5_1B_init"}, \ | 134 | TYPE_SCTP_FUNC(sctp_sf_do_5_1B_init), \ |
133 | /* SCTP_STATE_COOKIE_WAIT */ \ | 135 | /* SCTP_STATE_COOKIE_WAIT */ \ |
134 | {.fn = sctp_sf_do_5_2_1_siminit, .name = "sctp_sf_do_5_2_1_siminit"}, \ | 136 | TYPE_SCTP_FUNC(sctp_sf_do_5_2_1_siminit), \ |
135 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 137 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
136 | {.fn = sctp_sf_do_5_2_1_siminit, .name = "sctp_sf_do_5_2_1_siminit"}, \ | 138 | TYPE_SCTP_FUNC(sctp_sf_do_5_2_1_siminit), \ |
137 | /* SCTP_STATE_ESTABLISHED */ \ | 139 | /* SCTP_STATE_ESTABLISHED */ \ |
138 | {.fn = sctp_sf_do_5_2_2_dupinit, .name = "sctp_sf_do_5_2_2_dupinit"}, \ | 140 | TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \ |
139 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 141 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
140 | {.fn = sctp_sf_do_5_2_2_dupinit, .name = "sctp_sf_do_5_2_2_dupinit"}, \ | 142 | TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \ |
141 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 143 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
142 | {.fn = sctp_sf_do_5_2_2_dupinit, .name = "sctp_sf_do_5_2_2_dupinit"}, \ | 144 | TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \ |
143 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 145 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
144 | {.fn = sctp_sf_do_5_2_2_dupinit, .name = "sctp_sf_do_5_2_2_dupinit"}, \ | 146 | TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \ |
145 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 147 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
146 | {.fn = sctp_sf_do_9_2_reshutack, .name = "sctp_sf_do_9_2_reshutack"}, \ | 148 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_reshutack), \ |
147 | } /* TYPE_SCTP_INIT */ | 149 | } /* TYPE_SCTP_INIT */ |
148 | 150 | ||
149 | #define TYPE_SCTP_INIT_ACK { \ | 151 | #define TYPE_SCTP_INIT_ACK { \ |
150 | /* SCTP_STATE_EMPTY */ \ | 152 | /* SCTP_STATE_EMPTY */ \ |
151 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 153 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
152 | /* SCTP_STATE_CLOSED */ \ | 154 | /* SCTP_STATE_CLOSED */ \ |
153 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 155 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
154 | /* SCTP_STATE_COOKIE_WAIT */ \ | 156 | /* SCTP_STATE_COOKIE_WAIT */ \ |
155 | {.fn = sctp_sf_do_5_1C_ack, .name = "sctp_sf_do_5_1C_ack"}, \ | 157 | TYPE_SCTP_FUNC(sctp_sf_do_5_1C_ack), \ |
156 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 158 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
157 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 159 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
158 | /* SCTP_STATE_ESTABLISHED */ \ | 160 | /* SCTP_STATE_ESTABLISHED */ \ |
159 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 161 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
160 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 162 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
161 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 163 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
162 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 164 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
163 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 165 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
164 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 166 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
165 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 167 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
166 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 168 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
167 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 169 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
168 | } /* TYPE_SCTP_INIT_ACK */ | 170 | } /* TYPE_SCTP_INIT_ACK */ |
169 | 171 | ||
170 | #define TYPE_SCTP_SACK { \ | 172 | #define TYPE_SCTP_SACK { \ |
171 | /* SCTP_STATE_EMPTY */ \ | 173 | /* SCTP_STATE_EMPTY */ \ |
172 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 174 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
173 | /* SCTP_STATE_CLOSED */ \ | 175 | /* SCTP_STATE_CLOSED */ \ |
174 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | 176 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ |
175 | /* SCTP_STATE_COOKIE_WAIT */ \ | 177 | /* SCTP_STATE_COOKIE_WAIT */ \ |
176 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 178 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
177 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 179 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
178 | {.fn = sctp_sf_eat_sack_6_2, .name = "sctp_sf_eat_sack_6_2"}, \ | 180 | TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \ |
179 | /* SCTP_STATE_ESTABLISHED */ \ | 181 | /* SCTP_STATE_ESTABLISHED */ \ |
180 | {.fn = sctp_sf_eat_sack_6_2, .name = "sctp_sf_eat_sack_6_2"}, \ | 182 | TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \ |
181 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 183 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
182 | {.fn = sctp_sf_eat_sack_6_2, .name = "sctp_sf_eat_sack_6_2"}, \ | 184 | TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \ |
183 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 185 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
184 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 186 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
185 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 187 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
186 | {.fn = sctp_sf_eat_sack_6_2, .name = "sctp_sf_eat_sack_6_2"}, \ | 188 | TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \ |
187 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 189 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
188 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 190 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
189 | } /* TYPE_SCTP_SACK */ | 191 | } /* TYPE_SCTP_SACK */ |
190 | 192 | ||
191 | #define TYPE_SCTP_HEARTBEAT { \ | 193 | #define TYPE_SCTP_HEARTBEAT { \ |
192 | /* SCTP_STATE_EMPTY */ \ | 194 | /* SCTP_STATE_EMPTY */ \ |
193 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 195 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
194 | /* SCTP_STATE_CLOSED */ \ | 196 | /* SCTP_STATE_CLOSED */ \ |
195 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | 197 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ |
196 | /* SCTP_STATE_COOKIE_WAIT */ \ | 198 | /* SCTP_STATE_COOKIE_WAIT */ \ |
197 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 199 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
198 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 200 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
199 | {.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \ | 201 | TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ |
200 | /* SCTP_STATE_ESTABLISHED */ \ | 202 | /* SCTP_STATE_ESTABLISHED */ \ |
201 | {.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \ | 203 | TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ |
202 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 204 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
203 | {.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \ | 205 | TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ |
204 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 206 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
205 | {.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \ | 207 | TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ |
206 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 208 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
207 | {.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \ | 209 | TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ |
208 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 210 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
209 | /* This should not happen, but we are nice. */ \ | 211 | /* This should not happen, but we are nice. */ \ |
210 | {.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \ | 212 | TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ |
211 | } /* TYPE_SCTP_HEARTBEAT */ | 213 | } /* TYPE_SCTP_HEARTBEAT */ |
212 | 214 | ||
213 | #define TYPE_SCTP_HEARTBEAT_ACK { \ | 215 | #define TYPE_SCTP_HEARTBEAT_ACK { \ |
214 | /* SCTP_STATE_EMPTY */ \ | 216 | /* SCTP_STATE_EMPTY */ \ |
215 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 217 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
216 | /* SCTP_STATE_CLOSED */ \ | 218 | /* SCTP_STATE_CLOSED */ \ |
217 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | 219 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ |
218 | /* SCTP_STATE_COOKIE_WAIT */ \ | 220 | /* SCTP_STATE_COOKIE_WAIT */ \ |
219 | {.fn = sctp_sf_violation, .name = "sctp_sf_violation"}, \ | 221 | TYPE_SCTP_FUNC(sctp_sf_violation), \ |
220 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 222 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
221 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 223 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
222 | /* SCTP_STATE_ESTABLISHED */ \ | 224 | /* SCTP_STATE_ESTABLISHED */ \ |
223 | {.fn = sctp_sf_backbeat_8_3, .name = "sctp_sf_backbeat_8_3"}, \ | 225 | TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \ |
224 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 226 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
225 | {.fn = sctp_sf_backbeat_8_3, .name = "sctp_sf_backbeat_8_3"}, \ | 227 | TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \ |
226 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 228 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
227 | {.fn = sctp_sf_backbeat_8_3, .name = "sctp_sf_backbeat_8_3"}, \ | 229 | TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \ |
228 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 230 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
229 | {.fn = sctp_sf_backbeat_8_3, .name = "sctp_sf_backbeat_8_3"}, \ | 231 | TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \ |
230 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 232 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
231 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 233 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
232 | } /* TYPE_SCTP_HEARTBEAT_ACK */ | 234 | } /* TYPE_SCTP_HEARTBEAT_ACK */ |
233 | 235 | ||
234 | #define TYPE_SCTP_ABORT { \ | 236 | #define TYPE_SCTP_ABORT { \ |
235 | /* SCTP_STATE_EMPTY */ \ | 237 | /* SCTP_STATE_EMPTY */ \ |
236 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 238 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
237 | /* SCTP_STATE_CLOSED */ \ | 239 | /* SCTP_STATE_CLOSED */ \ |
238 | {.fn = sctp_sf_pdiscard, .name = "sctp_sf_pdiscard"}, \ | 240 | TYPE_SCTP_FUNC(sctp_sf_pdiscard), \ |
239 | /* SCTP_STATE_COOKIE_WAIT */ \ | 241 | /* SCTP_STATE_COOKIE_WAIT */ \ |
240 | {.fn = sctp_sf_cookie_wait_abort, .name = "sctp_sf_cookie_wait_abort"}, \ | 242 | TYPE_SCTP_FUNC(sctp_sf_cookie_wait_abort), \ |
241 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 243 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
242 | {.fn = sctp_sf_cookie_echoed_abort, \ | 244 | TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_abort), \ |
243 | .name = "sctp_sf_cookie_echoed_abort"}, \ | ||
244 | /* SCTP_STATE_ESTABLISHED */ \ | 245 | /* SCTP_STATE_ESTABLISHED */ \ |
245 | {.fn = sctp_sf_do_9_1_abort, .name = "sctp_sf_do_9_1_abort"}, \ | 246 | TYPE_SCTP_FUNC(sctp_sf_do_9_1_abort), \ |
246 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 247 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
247 | {.fn = sctp_sf_shutdown_pending_abort, \ | 248 | TYPE_SCTP_FUNC(sctp_sf_shutdown_pending_abort), \ |
248 | .name = "sctp_sf_shutdown_pending_abort"}, \ | ||
249 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 249 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
250 | {.fn = sctp_sf_shutdown_sent_abort, \ | 250 | TYPE_SCTP_FUNC(sctp_sf_shutdown_sent_abort), \ |
251 | .name = "sctp_sf_shutdown_sent_abort"}, \ | ||
252 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 251 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
253 | {.fn = sctp_sf_do_9_1_abort, .name = "sctp_sf_do_9_1_abort"}, \ | 252 | TYPE_SCTP_FUNC(sctp_sf_do_9_1_abort), \ |
254 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 253 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
255 | {.fn = sctp_sf_shutdown_ack_sent_abort, \ | 254 | TYPE_SCTP_FUNC(sctp_sf_shutdown_ack_sent_abort), \ |
256 | .name = "sctp_sf_shutdown_ack_sent_abort"}, \ | ||
257 | } /* TYPE_SCTP_ABORT */ | 255 | } /* TYPE_SCTP_ABORT */ |
258 | 256 | ||
259 | #define TYPE_SCTP_SHUTDOWN { \ | 257 | #define TYPE_SCTP_SHUTDOWN { \ |
260 | /* SCTP_STATE_EMPTY */ \ | 258 | /* SCTP_STATE_EMPTY */ \ |
261 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 259 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
262 | /* SCTP_STATE_CLOSED */ \ | 260 | /* SCTP_STATE_CLOSED */ \ |
263 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | 261 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ |
264 | /* SCTP_STATE_COOKIE_WAIT */ \ | 262 | /* SCTP_STATE_COOKIE_WAIT */ \ |
265 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 263 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
266 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 264 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
267 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 265 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
268 | /* SCTP_STATE_ESTABLISHED */ \ | 266 | /* SCTP_STATE_ESTABLISHED */ \ |
269 | {.fn = sctp_sf_do_9_2_shutdown, .name = "sctp_sf_do_9_2_shutdown"}, \ | 267 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown), \ |
270 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 268 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
271 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 269 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
272 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 270 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
273 | {.fn = sctp_sf_do_9_2_shutdown_ack, \ | 271 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown_ack), \ |
274 | .name = "sctp_sf_do_9_2_shutdown_ack"}, \ | ||
275 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 272 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
276 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 273 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
277 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 274 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
278 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 275 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
279 | } /* TYPE_SCTP_SHUTDOWN */ | 276 | } /* TYPE_SCTP_SHUTDOWN */ |
280 | 277 | ||
281 | #define TYPE_SCTP_SHUTDOWN_ACK { \ | 278 | #define TYPE_SCTP_SHUTDOWN_ACK { \ |
282 | /* SCTP_STATE_EMPTY */ \ | 279 | /* SCTP_STATE_EMPTY */ \ |
283 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 280 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
284 | /* SCTP_STATE_CLOSED */ \ | 281 | /* SCTP_STATE_CLOSED */ \ |
285 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 282 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
286 | /* SCTP_STATE_COOKIE_WAIT */ \ | 283 | /* SCTP_STATE_COOKIE_WAIT */ \ |
287 | {.fn = sctp_sf_do_8_5_1_E_sa, .name = "sctp_sf_do_8_5_1_E_sa"}, \ | 284 | TYPE_SCTP_FUNC(sctp_sf_do_8_5_1_E_sa), \ |
288 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 285 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
289 | {.fn = sctp_sf_do_8_5_1_E_sa, .name = "sctp_sf_do_8_5_1_E_sa"}, \ | 286 | TYPE_SCTP_FUNC(sctp_sf_do_8_5_1_E_sa), \ |
290 | /* SCTP_STATE_ESTABLISHED */ \ | 287 | /* SCTP_STATE_ESTABLISHED */ \ |
291 | {.fn = sctp_sf_violation, .name = "sctp_sf_violation"}, \ | 288 | TYPE_SCTP_FUNC(sctp_sf_violation), \ |
292 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 289 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
293 | {.fn = sctp_sf_violation, .name = "sctp_sf_violation"}, \ | 290 | TYPE_SCTP_FUNC(sctp_sf_violation), \ |
294 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 291 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
295 | {.fn = sctp_sf_do_9_2_final, .name = "sctp_sf_do_9_2_final"}, \ | 292 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_final), \ |
296 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 293 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
297 | {.fn = sctp_sf_violation, .name = "sctp_sf_violation"}, \ | 294 | TYPE_SCTP_FUNC(sctp_sf_violation), \ |
298 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 295 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
299 | {.fn = sctp_sf_do_9_2_final, .name = "sctp_sf_do_9_2_final"}, \ | 296 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_final), \ |
300 | } /* TYPE_SCTP_SHUTDOWN_ACK */ | 297 | } /* TYPE_SCTP_SHUTDOWN_ACK */ |
301 | 298 | ||
302 | #define TYPE_SCTP_ERROR { \ | 299 | #define TYPE_SCTP_ERROR { \ |
303 | /* SCTP_STATE_EMPTY */ \ | 300 | /* SCTP_STATE_EMPTY */ \ |
304 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 301 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
305 | /* SCTP_STATE_CLOSED */ \ | 302 | /* SCTP_STATE_CLOSED */ \ |
306 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | 303 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ |
307 | /* SCTP_STATE_COOKIE_WAIT */ \ | 304 | /* SCTP_STATE_COOKIE_WAIT */ \ |
308 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 305 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
309 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 306 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
310 | {.fn = sctp_sf_cookie_echoed_err, .name = "sctp_sf_cookie_echoed_err"}, \ | 307 | TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_err), \ |
311 | /* SCTP_STATE_ESTABLISHED */ \ | 308 | /* SCTP_STATE_ESTABLISHED */ \ |
312 | {.fn = sctp_sf_operr_notify, .name = "sctp_sf_operr_notify"}, \ | 309 | TYPE_SCTP_FUNC(sctp_sf_operr_notify), \ |
313 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 310 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
314 | {.fn = sctp_sf_operr_notify, .name = "sctp_sf_operr_notify"}, \ | 311 | TYPE_SCTP_FUNC(sctp_sf_operr_notify), \ |
315 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 312 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
316 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 313 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
317 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 314 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
318 | {.fn = sctp_sf_operr_notify, .name = "sctp_sf_operr_notify"}, \ | 315 | TYPE_SCTP_FUNC(sctp_sf_operr_notify), \ |
319 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 316 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
320 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 317 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
321 | } /* TYPE_SCTP_ERROR */ | 318 | } /* TYPE_SCTP_ERROR */ |
322 | 319 | ||
323 | #define TYPE_SCTP_COOKIE_ECHO { \ | 320 | #define TYPE_SCTP_COOKIE_ECHO { \ |
324 | /* SCTP_STATE_EMPTY */ \ | 321 | /* SCTP_STATE_EMPTY */ \ |
325 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 322 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
326 | /* SCTP_STATE_CLOSED */ \ | 323 | /* SCTP_STATE_CLOSED */ \ |
327 | {.fn = sctp_sf_do_5_1D_ce, .name = "sctp_sf_do_5_1D_ce"}, \ | 324 | TYPE_SCTP_FUNC(sctp_sf_do_5_1D_ce), \ |
328 | /* SCTP_STATE_COOKIE_WAIT */ \ | 325 | /* SCTP_STATE_COOKIE_WAIT */ \ |
329 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | 326 | TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ |
330 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 327 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
331 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | 328 | TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ |
332 | /* SCTP_STATE_ESTABLISHED */ \ | 329 | /* SCTP_STATE_ESTABLISHED */ \ |
333 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | 330 | TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ |
334 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 331 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
335 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | 332 | TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ |
336 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 333 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
337 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | 334 | TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ |
338 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 335 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
339 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | 336 | TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ |
340 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 337 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
341 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | 338 | TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ |
342 | } /* TYPE_SCTP_COOKIE_ECHO */ | 339 | } /* TYPE_SCTP_COOKIE_ECHO */ |
343 | 340 | ||
344 | #define TYPE_SCTP_COOKIE_ACK { \ | 341 | #define TYPE_SCTP_COOKIE_ACK { \ |
345 | /* SCTP_STATE_EMPTY */ \ | 342 | /* SCTP_STATE_EMPTY */ \ |
346 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 343 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
347 | /* SCTP_STATE_CLOSED */ \ | 344 | /* SCTP_STATE_CLOSED */ \ |
348 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 345 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
349 | /* SCTP_STATE_COOKIE_WAIT */ \ | 346 | /* SCTP_STATE_COOKIE_WAIT */ \ |
350 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 347 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
351 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 348 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
352 | {.fn = sctp_sf_do_5_1E_ca, .name = "sctp_sf_do_5_1E_ca"}, \ | 349 | TYPE_SCTP_FUNC(sctp_sf_do_5_1E_ca), \ |
353 | /* SCTP_STATE_ESTABLISHED */ \ | 350 | /* SCTP_STATE_ESTABLISHED */ \ |
354 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 351 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
355 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 352 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
356 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 353 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
357 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 354 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
358 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 355 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
359 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 356 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
360 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 357 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
361 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 358 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
362 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 359 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
363 | } /* TYPE_SCTP_COOKIE_ACK */ | 360 | } /* TYPE_SCTP_COOKIE_ACK */ |
364 | 361 | ||
365 | #define TYPE_SCTP_ECN_ECNE { \ | 362 | #define TYPE_SCTP_ECN_ECNE { \ |
366 | /* SCTP_STATE_EMPTY */ \ | 363 | /* SCTP_STATE_EMPTY */ \ |
367 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 364 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
368 | /* SCTP_STATE_CLOSED */ \ | 365 | /* SCTP_STATE_CLOSED */ \ |
369 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 366 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
370 | /* SCTP_STATE_COOKIE_WAIT */ \ | 367 | /* SCTP_STATE_COOKIE_WAIT */ \ |
371 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 368 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
372 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 369 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
373 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ | 370 | TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ |
374 | /* SCTP_STATE_ESTABLISHED */ \ | 371 | /* SCTP_STATE_ESTABLISHED */ \ |
375 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ | 372 | TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ |
376 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 373 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
377 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ | 374 | TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ |
378 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 375 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
379 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ | 376 | TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ |
380 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 377 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
381 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ | 378 | TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ |
382 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 379 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
383 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 380 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
384 | } /* TYPE_SCTP_ECN_ECNE */ | 381 | } /* TYPE_SCTP_ECN_ECNE */ |
385 | 382 | ||
386 | #define TYPE_SCTP_ECN_CWR { \ | 383 | #define TYPE_SCTP_ECN_CWR { \ |
387 | /* SCTP_STATE_EMPTY */ \ | 384 | /* SCTP_STATE_EMPTY */ \ |
388 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 385 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
389 | /* SCTP_STATE_CLOSED */ \ | 386 | /* SCTP_STATE_CLOSED */ \ |
390 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 387 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
391 | /* SCTP_STATE_COOKIE_WAIT */ \ | 388 | /* SCTP_STATE_COOKIE_WAIT */ \ |
392 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 389 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
393 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 390 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
394 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 391 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
395 | /* SCTP_STATE_ESTABLISHED */ \ | 392 | /* SCTP_STATE_ESTABLISHED */ \ |
396 | {.fn = sctp_sf_do_ecn_cwr, .name = "sctp_sf_do_ecn_cwr"}, \ | 393 | TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \ |
397 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 394 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
398 | {.fn = sctp_sf_do_ecn_cwr, .name = "sctp_sf_do_ecn_cwr"}, \ | 395 | TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \ |
399 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 396 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
400 | {.fn = sctp_sf_do_ecn_cwr, .name = "sctp_sf_do_ecn_cwr"}, \ | 397 | TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \ |
401 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 398 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
402 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 399 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
403 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 400 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
404 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 401 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
405 | } /* TYPE_SCTP_ECN_CWR */ | 402 | } /* TYPE_SCTP_ECN_CWR */ |
406 | 403 | ||
407 | #define TYPE_SCTP_SHUTDOWN_COMPLETE { \ | 404 | #define TYPE_SCTP_SHUTDOWN_COMPLETE { \ |
408 | /* SCTP_STATE_EMPTY */ \ | 405 | /* SCTP_STATE_EMPTY */ \ |
409 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 406 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
410 | /* SCTP_STATE_CLOSED */ \ | 407 | /* SCTP_STATE_CLOSED */ \ |
411 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 408 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
412 | /* SCTP_STATE_COOKIE_WAIT */ \ | 409 | /* SCTP_STATE_COOKIE_WAIT */ \ |
413 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 410 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
414 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 411 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
415 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 412 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
416 | /* SCTP_STATE_ESTABLISHED */ \ | 413 | /* SCTP_STATE_ESTABLISHED */ \ |
417 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 414 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
418 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 415 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
419 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 416 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
420 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 417 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
421 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 418 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
422 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 419 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
423 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 420 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
424 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 421 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
425 | {.fn = sctp_sf_do_4_C, .name = "sctp_sf_do_4_C"}, \ | 422 | TYPE_SCTP_FUNC(sctp_sf_do_4_C), \ |
426 | } /* TYPE_SCTP_SHUTDOWN_COMPLETE */ | 423 | } /* TYPE_SCTP_SHUTDOWN_COMPLETE */ |
427 | 424 | ||
428 | /* The primary index for this table is the chunk type. | 425 | /* The primary index for this table is the chunk type. |
@@ -450,44 +447,44 @@ static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][ | |||
450 | 447 | ||
451 | #define TYPE_SCTP_ASCONF { \ | 448 | #define TYPE_SCTP_ASCONF { \ |
452 | /* SCTP_STATE_EMPTY */ \ | 449 | /* SCTP_STATE_EMPTY */ \ |
453 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 450 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
454 | /* SCTP_STATE_CLOSED */ \ | 451 | /* SCTP_STATE_CLOSED */ \ |
455 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 452 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
456 | /* SCTP_STATE_COOKIE_WAIT */ \ | 453 | /* SCTP_STATE_COOKIE_WAIT */ \ |
457 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 454 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
458 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 455 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
459 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 456 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
460 | /* SCTP_STATE_ESTABLISHED */ \ | 457 | /* SCTP_STATE_ESTABLISHED */ \ |
461 | {.fn = sctp_sf_do_asconf, .name = "sctp_sf_do_asconf"}, \ | 458 | TYPE_SCTP_FUNC(sctp_sf_do_asconf), \ |
462 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 459 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
463 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 460 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
464 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 461 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
465 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 462 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
466 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 463 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
467 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 464 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
468 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 465 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
469 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 466 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
470 | } /* TYPE_SCTP_ASCONF */ | 467 | } /* TYPE_SCTP_ASCONF */ |
471 | 468 | ||
472 | #define TYPE_SCTP_ASCONF_ACK { \ | 469 | #define TYPE_SCTP_ASCONF_ACK { \ |
473 | /* SCTP_STATE_EMPTY */ \ | 470 | /* SCTP_STATE_EMPTY */ \ |
474 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 471 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
475 | /* SCTP_STATE_CLOSED */ \ | 472 | /* SCTP_STATE_CLOSED */ \ |
476 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 473 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
477 | /* SCTP_STATE_COOKIE_WAIT */ \ | 474 | /* SCTP_STATE_COOKIE_WAIT */ \ |
478 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 475 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
479 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 476 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
480 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 477 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
481 | /* SCTP_STATE_ESTABLISHED */ \ | 478 | /* SCTP_STATE_ESTABLISHED */ \ |
482 | {.fn = sctp_sf_do_asconf_ack, .name = "sctp_sf_do_asconf_ack"}, \ | 479 | TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \ |
483 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 480 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
484 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 481 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
485 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 482 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
486 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 483 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
487 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 484 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
488 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 485 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
489 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 486 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
490 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 487 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
491 | } /* TYPE_SCTP_ASCONF_ACK */ | 488 | } /* TYPE_SCTP_ASCONF_ACK */ |
492 | 489 | ||
493 | /* The primary index for this table is the chunk type. | 490 | /* The primary index for this table is the chunk type. |
@@ -500,23 +497,23 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_ | |||
500 | 497 | ||
501 | #define TYPE_SCTP_FWD_TSN { \ | 498 | #define TYPE_SCTP_FWD_TSN { \ |
502 | /* SCTP_STATE_EMPTY */ \ | 499 | /* SCTP_STATE_EMPTY */ \ |
503 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | 500 | TYPE_SCTP_FUNC(sctp_sf_ootb), \ |
504 | /* SCTP_STATE_CLOSED */ \ | 501 | /* SCTP_STATE_CLOSED */ \ |
505 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | 502 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), \ |
506 | /* SCTP_STATE_COOKIE_WAIT */ \ | 503 | /* SCTP_STATE_COOKIE_WAIT */ \ |
507 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 504 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
508 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 505 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
509 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 506 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
510 | /* SCTP_STATE_ESTABLISHED */ \ | 507 | /* SCTP_STATE_ESTABLISHED */ \ |
511 | {.fn = sctp_sf_eat_fwd_tsn, .name = "sctp_sf_eat_fwd_tsn"}, \ | 508 | TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn), \ |
512 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 509 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
513 | {.fn = sctp_sf_eat_fwd_tsn, .name = "sctp_sf_eat_fwd_tsn"}, \ | 510 | TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn), \ |
514 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 511 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
515 | {.fn = sctp_sf_eat_fwd_tsn_fast, .name = "sctp_sf_eat_fwd_tsn_fast"}, \ | 512 | TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn_fast), \ |
516 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 513 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
517 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 514 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
518 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 515 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
519 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | 516 | TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ |
520 | } /* TYPE_SCTP_FWD_TSN */ | 517 | } /* TYPE_SCTP_FWD_TSN */ |
521 | 518 | ||
522 | /* The primary index for this table is the chunk type. | 519 | /* The primary index for this table is the chunk type. |
@@ -529,167 +526,150 @@ static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUN | |||
529 | static const sctp_sm_table_entry_t | 526 | static const sctp_sm_table_entry_t |
530 | chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { | 527 | chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { |
531 | /* SCTP_STATE_EMPTY */ | 528 | /* SCTP_STATE_EMPTY */ |
532 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, | 529 | TYPE_SCTP_FUNC(sctp_sf_ootb), |
533 | /* SCTP_STATE_CLOSED */ | 530 | /* SCTP_STATE_CLOSED */ |
534 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, | 531 | TYPE_SCTP_FUNC(sctp_sf_tabort_8_4_8), |
535 | /* SCTP_STATE_COOKIE_WAIT */ | 532 | /* SCTP_STATE_COOKIE_WAIT */ |
536 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | 533 | TYPE_SCTP_FUNC(sctp_sf_unk_chunk), |
537 | /* SCTP_STATE_COOKIE_ECHOED */ | 534 | /* SCTP_STATE_COOKIE_ECHOED */ |
538 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | 535 | TYPE_SCTP_FUNC(sctp_sf_unk_chunk), |
539 | /* SCTP_STATE_ESTABLISHED */ | 536 | /* SCTP_STATE_ESTABLISHED */ |
540 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | 537 | TYPE_SCTP_FUNC(sctp_sf_unk_chunk), |
541 | /* SCTP_STATE_SHUTDOWN_PENDING */ | 538 | /* SCTP_STATE_SHUTDOWN_PENDING */ |
542 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | 539 | TYPE_SCTP_FUNC(sctp_sf_unk_chunk), |
543 | /* SCTP_STATE_SHUTDOWN_SENT */ | 540 | /* SCTP_STATE_SHUTDOWN_SENT */ |
544 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | 541 | TYPE_SCTP_FUNC(sctp_sf_unk_chunk), |
545 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ | 542 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ |
546 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | 543 | TYPE_SCTP_FUNC(sctp_sf_unk_chunk), |
547 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ | 544 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ |
548 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | 545 | TYPE_SCTP_FUNC(sctp_sf_unk_chunk), |
549 | }; /* chunk unknown */ | 546 | }; /* chunk unknown */ |
550 | 547 | ||
551 | 548 | ||
552 | #define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \ | 549 | #define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \ |
553 | /* SCTP_STATE_EMPTY */ \ | 550 | /* SCTP_STATE_EMPTY */ \ |
554 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 551 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
555 | /* SCTP_STATE_CLOSED */ \ | 552 | /* SCTP_STATE_CLOSED */ \ |
556 | {.fn = sctp_sf_do_prm_asoc, .name = "sctp_sf_do_prm_asoc"}, \ | 553 | TYPE_SCTP_FUNC(sctp_sf_do_prm_asoc), \ |
557 | /* SCTP_STATE_COOKIE_WAIT */ \ | 554 | /* SCTP_STATE_COOKIE_WAIT */ \ |
558 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | 555 | TYPE_SCTP_FUNC(sctp_sf_not_impl), \ |
559 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 556 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
560 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | 557 | TYPE_SCTP_FUNC(sctp_sf_not_impl), \ |
561 | /* SCTP_STATE_ESTABLISHED */ \ | 558 | /* SCTP_STATE_ESTABLISHED */ \ |
562 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | 559 | TYPE_SCTP_FUNC(sctp_sf_not_impl), \ |
563 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 560 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
564 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | 561 | TYPE_SCTP_FUNC(sctp_sf_not_impl), \ |
565 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 562 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
566 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | 563 | TYPE_SCTP_FUNC(sctp_sf_not_impl), \ |
567 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 564 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
568 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | 565 | TYPE_SCTP_FUNC(sctp_sf_not_impl), \ |
569 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 566 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
570 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | 567 | TYPE_SCTP_FUNC(sctp_sf_not_impl), \ |
571 | } /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */ | 568 | } /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */ |
572 | 569 | ||
573 | #define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \ | 570 | #define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \ |
574 | /* SCTP_STATE_EMPTY */ \ | 571 | /* SCTP_STATE_EMPTY */ \ |
575 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 572 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
576 | /* SCTP_STATE_CLOSED */ \ | 573 | /* SCTP_STATE_CLOSED */ \ |
577 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ | 574 | TYPE_SCTP_FUNC(sctp_sf_error_closed), \ |
578 | /* SCTP_STATE_COOKIE_WAIT */ \ | 575 | /* SCTP_STATE_COOKIE_WAIT */ \ |
579 | {.fn = sctp_sf_cookie_wait_prm_shutdown, \ | 576 | TYPE_SCTP_FUNC(sctp_sf_cookie_wait_prm_shutdown), \ |
580 | .name = "sctp_sf_cookie_wait_prm_shutdown"}, \ | ||
581 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 577 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
582 | {.fn = sctp_sf_cookie_echoed_prm_shutdown, \ | 578 | TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_prm_shutdown),\ |
583 | .name = "sctp_sf_cookie_echoed_prm_shutdown"},\ | ||
584 | /* SCTP_STATE_ESTABLISHED */ \ | 579 | /* SCTP_STATE_ESTABLISHED */ \ |
585 | {.fn = sctp_sf_do_9_2_prm_shutdown, \ | 580 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_prm_shutdown), \ |
586 | .name = "sctp_sf_do_9_2_prm_shutdown"}, \ | ||
587 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 581 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
588 | {.fn = sctp_sf_ignore_primitive, .name = "sctp_sf_ignore_primitive"}, \ | 582 | TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \ |
589 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 583 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
590 | {.fn = sctp_sf_ignore_primitive, .name = "sctp_sf_ignore_primitive"}, \ | 584 | TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \ |
591 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 585 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
592 | {.fn = sctp_sf_ignore_primitive, .name = "sctp_sf_ignore_primitive"}, \ | 586 | TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \ |
593 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 587 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
594 | {.fn = sctp_sf_ignore_primitive, .name = "sctp_sf_ignore_primitive"}, \ | 588 | TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \ |
595 | } /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */ | 589 | } /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */ |
596 | 590 | ||
597 | #define TYPE_SCTP_PRIMITIVE_ABORT { \ | 591 | #define TYPE_SCTP_PRIMITIVE_ABORT { \ |
598 | /* SCTP_STATE_EMPTY */ \ | 592 | /* SCTP_STATE_EMPTY */ \ |
599 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 593 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
600 | /* SCTP_STATE_CLOSED */ \ | 594 | /* SCTP_STATE_CLOSED */ \ |
601 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ | 595 | TYPE_SCTP_FUNC(sctp_sf_error_closed), \ |
602 | /* SCTP_STATE_COOKIE_WAIT */ \ | 596 | /* SCTP_STATE_COOKIE_WAIT */ \ |
603 | {.fn = sctp_sf_cookie_wait_prm_abort, \ | 597 | TYPE_SCTP_FUNC(sctp_sf_cookie_wait_prm_abort), \ |
604 | .name = "sctp_sf_cookie_wait_prm_abort"}, \ | ||
605 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 598 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
606 | {.fn = sctp_sf_cookie_echoed_prm_abort, \ | 599 | TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_prm_abort), \ |
607 | .name = "sctp_sf_cookie_echoed_prm_abort"}, \ | ||
608 | /* SCTP_STATE_ESTABLISHED */ \ | 600 | /* SCTP_STATE_ESTABLISHED */ \ |
609 | {.fn = sctp_sf_do_9_1_prm_abort, \ | 601 | TYPE_SCTP_FUNC(sctp_sf_do_9_1_prm_abort), \ |
610 | .name = "sctp_sf_do_9_1_prm_abort"}, \ | ||
611 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 602 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
612 | {.fn = sctp_sf_shutdown_pending_prm_abort, \ | 603 | TYPE_SCTP_FUNC(sctp_sf_shutdown_pending_prm_abort), \ |
613 | .name = "sctp_sf_shutdown_pending_prm_abort"}, \ | ||
614 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 604 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
615 | {.fn = sctp_sf_shutdown_sent_prm_abort, \ | 605 | TYPE_SCTP_FUNC(sctp_sf_shutdown_sent_prm_abort), \ |
616 | .name = "sctp_sf_shutdown_sent_prm_abort"}, \ | ||
617 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 606 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
618 | {.fn = sctp_sf_do_9_1_prm_abort, \ | 607 | TYPE_SCTP_FUNC(sctp_sf_do_9_1_prm_abort), \ |
619 | .name = "sctp_sf_do_9_1_prm_abort"}, \ | ||
620 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 608 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
621 | {.fn = sctp_sf_shutdown_ack_sent_prm_abort, \ | 609 | TYPE_SCTP_FUNC(sctp_sf_shutdown_ack_sent_prm_abort), \ |
622 | .name = "sctp_sf_shutdown_ack_sent_prm_abort"}, \ | ||
623 | } /* TYPE_SCTP_PRIMITIVE_ABORT */ | 610 | } /* TYPE_SCTP_PRIMITIVE_ABORT */ |
624 | 611 | ||
625 | #define TYPE_SCTP_PRIMITIVE_SEND { \ | 612 | #define TYPE_SCTP_PRIMITIVE_SEND { \ |
626 | /* SCTP_STATE_EMPTY */ \ | 613 | /* SCTP_STATE_EMPTY */ \ |
627 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 614 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
628 | /* SCTP_STATE_CLOSED */ \ | 615 | /* SCTP_STATE_CLOSED */ \ |
629 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ | 616 | TYPE_SCTP_FUNC(sctp_sf_error_closed), \ |
630 | /* SCTP_STATE_COOKIE_WAIT */ \ | 617 | /* SCTP_STATE_COOKIE_WAIT */ \ |
631 | {.fn = sctp_sf_do_prm_send, .name = "sctp_sf_do_prm_send"}, \ | 618 | TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \ |
632 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 619 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
633 | {.fn = sctp_sf_do_prm_send, .name = "sctp_sf_do_prm_send"}, \ | 620 | TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \ |
634 | /* SCTP_STATE_ESTABLISHED */ \ | 621 | /* SCTP_STATE_ESTABLISHED */ \ |
635 | {.fn = sctp_sf_do_prm_send, .name = "sctp_sf_do_prm_send"}, \ | 622 | TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \ |
636 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 623 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
637 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | 624 | TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ |
638 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 625 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
639 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | 626 | TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ |
640 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 627 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
641 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | 628 | TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ |
642 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 629 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
643 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | 630 | TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ |
644 | } /* TYPE_SCTP_PRIMITIVE_SEND */ | 631 | } /* TYPE_SCTP_PRIMITIVE_SEND */ |
645 | 632 | ||
646 | #define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \ | 633 | #define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \ |
647 | /* SCTP_STATE_EMPTY */ \ | 634 | /* SCTP_STATE_EMPTY */ \ |
648 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 635 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
649 | /* SCTP_STATE_CLOSED */ \ | 636 | /* SCTP_STATE_CLOSED */ \ |
650 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ | 637 | TYPE_SCTP_FUNC(sctp_sf_error_closed), \ |
651 | /* SCTP_STATE_COOKIE_WAIT */ \ | 638 | /* SCTP_STATE_COOKIE_WAIT */ \ |
652 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | 639 | TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ |
653 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
654 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 640 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
655 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | 641 | TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ |
656 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
657 | /* SCTP_STATE_ESTABLISHED */ \ | 642 | /* SCTP_STATE_ESTABLISHED */ \ |
658 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | 643 | TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ |
659 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
660 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 644 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
661 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | 645 | TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ |
662 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
663 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 646 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
664 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | 647 | TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ |
665 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
666 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 648 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
667 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | 649 | TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ |
668 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
669 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 650 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
670 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | 651 | TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ |
671 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
672 | } /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ | 652 | } /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ |
673 | 653 | ||
674 | #define TYPE_SCTP_PRIMITIVE_ASCONF { \ | 654 | #define TYPE_SCTP_PRIMITIVE_ASCONF { \ |
675 | /* SCTP_STATE_EMPTY */ \ | 655 | /* SCTP_STATE_EMPTY */ \ |
676 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 656 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
677 | /* SCTP_STATE_CLOSED */ \ | 657 | /* SCTP_STATE_CLOSED */ \ |
678 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ | 658 | TYPE_SCTP_FUNC(sctp_sf_error_closed), \ |
679 | /* SCTP_STATE_COOKIE_WAIT */ \ | 659 | /* SCTP_STATE_COOKIE_WAIT */ \ |
680 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ | 660 | TYPE_SCTP_FUNC(sctp_sf_error_closed), \ |
681 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 661 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
682 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ | 662 | TYPE_SCTP_FUNC(sctp_sf_error_closed), \ |
683 | /* SCTP_STATE_ESTABLISHED */ \ | 663 | /* SCTP_STATE_ESTABLISHED */ \ |
684 | {.fn = sctp_sf_do_prm_asconf, .name = "sctp_sf_do_prm_asconf"}, \ | 664 | TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \ |
685 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 665 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
686 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | 666 | TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ |
687 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 667 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
688 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | 668 | TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ |
689 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 669 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
690 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | 670 | TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ |
691 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 671 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
692 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | 672 | TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ |
693 | } /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ | 673 | } /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ |
694 | 674 | ||
695 | /* The primary index for this table is the primitive type. | 675 | /* The primary index for this table is the primitive type. |
@@ -706,47 +686,44 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE | |||
706 | 686 | ||
707 | #define TYPE_SCTP_OTHER_NO_PENDING_TSN { \ | 687 | #define TYPE_SCTP_OTHER_NO_PENDING_TSN { \ |
708 | /* SCTP_STATE_EMPTY */ \ | 688 | /* SCTP_STATE_EMPTY */ \ |
709 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 689 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
710 | /* SCTP_STATE_CLOSED */ \ | 690 | /* SCTP_STATE_CLOSED */ \ |
711 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | 691 | TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ |
712 | /* SCTP_STATE_COOKIE_WAIT */ \ | 692 | /* SCTP_STATE_COOKIE_WAIT */ \ |
713 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | 693 | TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ |
714 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 694 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
715 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | 695 | TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ |
716 | /* SCTP_STATE_ESTABLISHED */ \ | 696 | /* SCTP_STATE_ESTABLISHED */ \ |
717 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | 697 | TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ |
718 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 698 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
719 | {.fn = sctp_sf_do_9_2_start_shutdown, \ | 699 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_start_shutdown), \ |
720 | .name = "sctp_do_9_2_start_shutdown"}, \ | ||
721 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 700 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
722 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | 701 | TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ |
723 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 702 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
724 | {.fn = sctp_sf_do_9_2_shutdown_ack, \ | 703 | TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown_ack), \ |
725 | .name = "sctp_sf_do_9_2_shutdown_ack"}, \ | ||
726 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 704 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
727 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | 705 | TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ |
728 | } | 706 | } |
729 | 707 | ||
730 | #define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \ | 708 | #define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \ |
731 | /* SCTP_STATE_EMPTY */ \ | 709 | /* SCTP_STATE_EMPTY */ \ |
732 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 710 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
733 | /* SCTP_STATE_CLOSED */ \ | 711 | /* SCTP_STATE_CLOSED */ \ |
734 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | 712 | TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ |
735 | /* SCTP_STATE_COOKIE_WAIT */ \ | 713 | /* SCTP_STATE_COOKIE_WAIT */ \ |
736 | {.fn = sctp_sf_cookie_wait_icmp_abort, \ | 714 | TYPE_SCTP_FUNC(sctp_sf_cookie_wait_icmp_abort), \ |
737 | .name = "sctp_sf_cookie_wait_icmp_abort"}, \ | ||
738 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 715 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
739 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | 716 | TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ |
740 | /* SCTP_STATE_ESTABLISHED */ \ | 717 | /* SCTP_STATE_ESTABLISHED */ \ |
741 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | 718 | TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ |
742 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 719 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
743 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | 720 | TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ |
744 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 721 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
745 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | 722 | TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ |
746 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 723 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
747 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | 724 | TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ |
748 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 725 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
749 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | 726 | TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ |
750 | } | 727 | } |
751 | 728 | ||
752 | static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES] = { | 729 | static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES] = { |
@@ -756,215 +733,212 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_ | |||
756 | 733 | ||
757 | #define TYPE_SCTP_EVENT_TIMEOUT_NONE { \ | 734 | #define TYPE_SCTP_EVENT_TIMEOUT_NONE { \ |
758 | /* SCTP_STATE_EMPTY */ \ | 735 | /* SCTP_STATE_EMPTY */ \ |
759 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 736 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
760 | /* SCTP_STATE_CLOSED */ \ | 737 | /* SCTP_STATE_CLOSED */ \ |
761 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 738 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
762 | /* SCTP_STATE_COOKIE_WAIT */ \ | 739 | /* SCTP_STATE_COOKIE_WAIT */ \ |
763 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 740 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
764 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 741 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
765 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 742 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
766 | /* SCTP_STATE_ESTABLISHED */ \ | 743 | /* SCTP_STATE_ESTABLISHED */ \ |
767 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 744 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
768 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 745 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
769 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 746 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
770 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 747 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
771 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 748 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
772 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 749 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
773 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 750 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
774 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 751 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
775 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 752 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
776 | } | 753 | } |
777 | 754 | ||
778 | #define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \ | 755 | #define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \ |
779 | /* SCTP_STATE_EMPTY */ \ | 756 | /* SCTP_STATE_EMPTY */ \ |
780 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 757 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
781 | /* SCTP_STATE_CLOSED */ \ | 758 | /* SCTP_STATE_CLOSED */ \ |
782 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 759 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
783 | /* SCTP_STATE_COOKIE_WAIT */ \ | 760 | /* SCTP_STATE_COOKIE_WAIT */ \ |
784 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 761 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
785 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 762 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
786 | {.fn = sctp_sf_t1_cookie_timer_expire, \ | 763 | TYPE_SCTP_FUNC(sctp_sf_t1_cookie_timer_expire), \ |
787 | .name = "sctp_sf_t1_cookie_timer_expire"}, \ | ||
788 | /* SCTP_STATE_ESTABLISHED */ \ | 764 | /* SCTP_STATE_ESTABLISHED */ \ |
789 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 765 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
790 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 766 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
791 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 767 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
792 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 768 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
793 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 769 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
794 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 770 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
795 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 771 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
796 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 772 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
797 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 773 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
798 | } | 774 | } |
799 | 775 | ||
800 | #define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \ | 776 | #define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \ |
801 | /* SCTP_STATE_EMPTY */ \ | 777 | /* SCTP_STATE_EMPTY */ \ |
802 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 778 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
803 | /* SCTP_STATE_CLOSED */ \ | 779 | /* SCTP_STATE_CLOSED */ \ |
804 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 780 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
805 | /* SCTP_STATE_COOKIE_WAIT */ \ | 781 | /* SCTP_STATE_COOKIE_WAIT */ \ |
806 | {.fn = sctp_sf_t1_init_timer_expire, \ | 782 | TYPE_SCTP_FUNC(sctp_sf_t1_init_timer_expire), \ |
807 | .name = "sctp_sf_t1_init_timer_expire"}, \ | ||
808 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 783 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
809 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 784 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
810 | /* SCTP_STATE_ESTABLISHED */ \ | 785 | /* SCTP_STATE_ESTABLISHED */ \ |
811 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 786 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
812 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 787 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
813 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 788 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
814 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 789 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
815 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 790 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
816 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 791 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
817 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 792 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
818 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 793 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
819 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 794 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
820 | } | 795 | } |
821 | 796 | ||
822 | #define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \ | 797 | #define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \ |
823 | /* SCTP_STATE_EMPTY */ \ | 798 | /* SCTP_STATE_EMPTY */ \ |
824 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 799 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
825 | /* SCTP_STATE_CLOSED */ \ | 800 | /* SCTP_STATE_CLOSED */ \ |
826 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 801 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
827 | /* SCTP_STATE_COOKIE_WAIT */ \ | 802 | /* SCTP_STATE_COOKIE_WAIT */ \ |
828 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 803 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
829 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 804 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
830 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 805 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
831 | /* SCTP_STATE_ESTABLISHED */ \ | 806 | /* SCTP_STATE_ESTABLISHED */ \ |
832 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 807 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
833 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 808 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
834 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 809 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
835 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 810 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
836 | {.fn = sctp_sf_t2_timer_expire, .name = "sctp_sf_t2_timer_expire"}, \ | 811 | TYPE_SCTP_FUNC(sctp_sf_t2_timer_expire), \ |
837 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 812 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
838 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 813 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
839 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 814 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
840 | {.fn = sctp_sf_t2_timer_expire, .name = "sctp_sf_t2_timer_expire"}, \ | 815 | TYPE_SCTP_FUNC(sctp_sf_t2_timer_expire), \ |
841 | } | 816 | } |
842 | 817 | ||
843 | #define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \ | 818 | #define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \ |
844 | /* SCTP_STATE_EMPTY */ \ | 819 | /* SCTP_STATE_EMPTY */ \ |
845 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 820 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
846 | /* SCTP_STATE_CLOSED */ \ | 821 | /* SCTP_STATE_CLOSED */ \ |
847 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 822 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
848 | /* SCTP_STATE_COOKIE_WAIT */ \ | 823 | /* SCTP_STATE_COOKIE_WAIT */ \ |
849 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 824 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
850 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 825 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
851 | {.fn = sctp_sf_do_6_3_3_rtx, .name = "sctp_sf_do_6_3_3_rtx"}, \ | 826 | TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \ |
852 | /* SCTP_STATE_ESTABLISHED */ \ | 827 | /* SCTP_STATE_ESTABLISHED */ \ |
853 | {.fn = sctp_sf_do_6_3_3_rtx, .name = "sctp_sf_do_6_3_3_rtx"}, \ | 828 | TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \ |
854 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 829 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
855 | {.fn = sctp_sf_do_6_3_3_rtx, .name = "sctp_sf_do_6_3_3_rtx"}, \ | 830 | TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \ |
856 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 831 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
857 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 832 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
858 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 833 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
859 | {.fn = sctp_sf_do_6_3_3_rtx, .name = "sctp_sf_do_6_3_3_rtx"}, \ | 834 | TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \ |
860 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 835 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
861 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 836 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
862 | } | 837 | } |
863 | 838 | ||
864 | #define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \ | 839 | #define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \ |
865 | /* SCTP_STATE_EMPTY */ \ | 840 | /* SCTP_STATE_EMPTY */ \ |
866 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 841 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
867 | /* SCTP_STATE_CLOSED */ \ | 842 | /* SCTP_STATE_CLOSED */ \ |
868 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 843 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
869 | /* SCTP_STATE_COOKIE_WAIT */ \ | 844 | /* SCTP_STATE_COOKIE_WAIT */ \ |
870 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 845 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
871 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 846 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
872 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 847 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
873 | /* SCTP_STATE_ESTABLISHED */ \ | 848 | /* SCTP_STATE_ESTABLISHED */ \ |
874 | {.fn = sctp_sf_t4_timer_expire, .name = "sctp_sf_t4_timer_expire"}, \ | 849 | TYPE_SCTP_FUNC(sctp_sf_t4_timer_expire), \ |
875 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 850 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
876 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 851 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
877 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 852 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
878 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 853 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
879 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 854 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
880 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 855 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
881 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 856 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
882 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 857 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
883 | } | 858 | } |
884 | 859 | ||
885 | #define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \ | 860 | #define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \ |
886 | /* SCTP_STATE_EMPTY */ \ | 861 | /* SCTP_STATE_EMPTY */ \ |
887 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 862 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
888 | /* SCTP_STATE_CLOSED */ \ | 863 | /* SCTP_STATE_CLOSED */ \ |
889 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 864 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
890 | /* SCTP_STATE_COOKIE_WAIT */ \ | 865 | /* SCTP_STATE_COOKIE_WAIT */ \ |
891 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 866 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
892 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 867 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
893 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 868 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
894 | /* SCTP_STATE_ESTABLISHED */ \ | 869 | /* SCTP_STATE_ESTABLISHED */ \ |
895 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 870 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
896 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 871 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
897 | {.fn = sctp_sf_t5_timer_expire, .name = "sctp_sf_t5_timer_expire"}, \ | 872 | TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ |
898 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 873 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
899 | {.fn = sctp_sf_t5_timer_expire, .name = "sctp_sf_t5_timer_expire"}, \ | 874 | TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ |
900 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 875 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
901 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 876 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
902 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 877 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
903 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 878 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
904 | } | 879 | } |
905 | 880 | ||
906 | #define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \ | 881 | #define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \ |
907 | /* SCTP_STATE_EMPTY */ \ | 882 | /* SCTP_STATE_EMPTY */ \ |
908 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 883 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
909 | /* SCTP_STATE_CLOSED */ \ | 884 | /* SCTP_STATE_CLOSED */ \ |
910 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 885 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
911 | /* SCTP_STATE_COOKIE_WAIT */ \ | 886 | /* SCTP_STATE_COOKIE_WAIT */ \ |
912 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 887 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
913 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 888 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
914 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 889 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
915 | /* SCTP_STATE_ESTABLISHED */ \ | 890 | /* SCTP_STATE_ESTABLISHED */ \ |
916 | {.fn = sctp_sf_sendbeat_8_3, .name = "sctp_sf_sendbeat_8_3"}, \ | 891 | TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \ |
917 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 892 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
918 | {.fn = sctp_sf_sendbeat_8_3, .name = "sctp_sf_sendbeat_8_3"}, \ | 893 | TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \ |
919 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 894 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
920 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 895 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
921 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 896 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
922 | {.fn = sctp_sf_sendbeat_8_3, .name = "sctp_sf_sendbeat_8_3"}, \ | 897 | TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \ |
923 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 898 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
924 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 899 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
925 | } | 900 | } |
926 | 901 | ||
927 | #define TYPE_SCTP_EVENT_TIMEOUT_SACK { \ | 902 | #define TYPE_SCTP_EVENT_TIMEOUT_SACK { \ |
928 | /* SCTP_STATE_EMPTY */ \ | 903 | /* SCTP_STATE_EMPTY */ \ |
929 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | 904 | TYPE_SCTP_FUNC(sctp_sf_bug), \ |
930 | /* SCTP_STATE_CLOSED */ \ | 905 | /* SCTP_STATE_CLOSED */ \ |
931 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 906 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
932 | /* SCTP_STATE_COOKIE_WAIT */ \ | 907 | /* SCTP_STATE_COOKIE_WAIT */ \ |
933 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 908 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
934 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 909 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
935 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 910 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
936 | /* SCTP_STATE_ESTABLISHED */ \ | 911 | /* SCTP_STATE_ESTABLISHED */ \ |
937 | {.fn = sctp_sf_do_6_2_sack, .name = "sctp_sf_do_6_2_sack"}, \ | 912 | TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \ |
938 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 913 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
939 | {.fn = sctp_sf_do_6_2_sack, .name = "sctp_sf_do_6_2_sack"}, \ | 914 | TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \ |
940 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 915 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
941 | {.fn = sctp_sf_do_6_2_sack, .name = "sctp_sf_do_6_2_sack"}, \ | 916 | TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \ |
942 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 917 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
943 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 918 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
944 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 919 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
945 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 920 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
946 | } | 921 | } |
947 | 922 | ||
948 | #define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \ | 923 | #define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \ |
949 | /* SCTP_STATE_EMPTY */ \ | 924 | /* SCTP_STATE_EMPTY */ \ |
950 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 925 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
951 | /* SCTP_STATE_CLOSED */ \ | 926 | /* SCTP_STATE_CLOSED */ \ |
952 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 927 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
953 | /* SCTP_STATE_COOKIE_WAIT */ \ | 928 | /* SCTP_STATE_COOKIE_WAIT */ \ |
954 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 929 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
955 | /* SCTP_STATE_COOKIE_ECHOED */ \ | 930 | /* SCTP_STATE_COOKIE_ECHOED */ \ |
956 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 931 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
957 | /* SCTP_STATE_ESTABLISHED */ \ | 932 | /* SCTP_STATE_ESTABLISHED */ \ |
958 | {.fn = sctp_sf_autoclose_timer_expire, \ | 933 | TYPE_SCTP_FUNC(sctp_sf_autoclose_timer_expire), \ |
959 | .name = "sctp_sf_autoclose_timer_expire"}, \ | ||
960 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | 934 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ |
961 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 935 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
962 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | 936 | /* SCTP_STATE_SHUTDOWN_SENT */ \ |
963 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 937 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
964 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | 938 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ |
965 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 939 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
966 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | 940 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ |
967 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | 941 | TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ |
968 | } | 942 | } |
969 | 943 | ||
970 | static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = { | 944 | static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = { |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 935bc9187fd8..02b27145b279 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -229,11 +229,9 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, | |||
229 | struct sctp_transport *transport; | 229 | struct sctp_transport *transport; |
230 | union sctp_addr *laddr = (union sctp_addr *)addr; | 230 | union sctp_addr *laddr = (union sctp_addr *)addr; |
231 | 231 | ||
232 | laddr->v4.sin_port = ntohs(laddr->v4.sin_port); | ||
233 | addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, | 232 | addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, |
234 | (union sctp_addr *)addr, | 233 | laddr, |
235 | &transport); | 234 | &transport); |
236 | laddr->v4.sin_port = htons(laddr->v4.sin_port); | ||
237 | 235 | ||
238 | if (!addr_asoc) | 236 | if (!addr_asoc) |
239 | return NULL; | 237 | return NULL; |
@@ -368,9 +366,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | |||
368 | sctp_write_lock(&ep->base.addr_lock); | 366 | sctp_write_lock(&ep->base.addr_lock); |
369 | 367 | ||
370 | /* Use GFP_ATOMIC since BHs are disabled. */ | 368 | /* Use GFP_ATOMIC since BHs are disabled. */ |
371 | addr->v4.sin_port = ntohs(addr->v4.sin_port); | ||
372 | ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC); | 369 | ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC); |
373 | addr->v4.sin_port = htons(addr->v4.sin_port); | ||
374 | sctp_write_unlock(&ep->base.addr_lock); | 370 | sctp_write_unlock(&ep->base.addr_lock); |
375 | sctp_local_bh_enable(); | 371 | sctp_local_bh_enable(); |
376 | 372 | ||
@@ -572,7 +568,6 @@ static int sctp_send_asconf_add_ip(struct sock *sk, | |||
572 | addr = (union sctp_addr *)addr_buf; | 568 | addr = (union sctp_addr *)addr_buf; |
573 | af = sctp_get_af_specific(addr->v4.sin_family); | 569 | af = sctp_get_af_specific(addr->v4.sin_family); |
574 | memcpy(&saveaddr, addr, af->sockaddr_len); | 570 | memcpy(&saveaddr, addr, af->sockaddr_len); |
575 | saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port); | ||
576 | retval = sctp_add_bind_addr(bp, &saveaddr, 0, | 571 | retval = sctp_add_bind_addr(bp, &saveaddr, 0, |
577 | GFP_ATOMIC); | 572 | GFP_ATOMIC); |
578 | addr_buf += af->sockaddr_len; | 573 | addr_buf += af->sockaddr_len; |
@@ -607,9 +602,8 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) | |||
607 | int cnt; | 602 | int cnt; |
608 | struct sctp_bind_addr *bp = &ep->base.bind_addr; | 603 | struct sctp_bind_addr *bp = &ep->base.bind_addr; |
609 | int retval = 0; | 604 | int retval = 0; |
610 | union sctp_addr saveaddr; | ||
611 | void *addr_buf; | 605 | void *addr_buf; |
612 | struct sockaddr *sa_addr; | 606 | union sctp_addr *sa_addr; |
613 | struct sctp_af *af; | 607 | struct sctp_af *af; |
614 | 608 | ||
615 | SCTP_DEBUG_PRINTK("sctp_bindx_rem (sk: %p, addrs: %p, addrcnt: %d)\n", | 609 | SCTP_DEBUG_PRINTK("sctp_bindx_rem (sk: %p, addrs: %p, addrcnt: %d)\n", |
@@ -627,19 +621,13 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) | |||
627 | goto err_bindx_rem; | 621 | goto err_bindx_rem; |
628 | } | 622 | } |
629 | 623 | ||
630 | /* The list may contain either IPv4 or IPv6 address; | 624 | sa_addr = (union sctp_addr *)addr_buf; |
631 | * determine the address length to copy the address to | 625 | af = sctp_get_af_specific(sa_addr->sa.sa_family); |
632 | * saveaddr. | ||
633 | */ | ||
634 | sa_addr = (struct sockaddr *)addr_buf; | ||
635 | af = sctp_get_af_specific(sa_addr->sa_family); | ||
636 | if (!af) { | 626 | if (!af) { |
637 | retval = -EINVAL; | 627 | retval = -EINVAL; |
638 | goto err_bindx_rem; | 628 | goto err_bindx_rem; |
639 | } | 629 | } |
640 | memcpy(&saveaddr, sa_addr, af->sockaddr_len); | 630 | if (sa_addr->v4.sin_port != htons(bp->port)) { |
641 | saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port); | ||
642 | if (saveaddr.v4.sin_port != bp->port) { | ||
643 | retval = -EINVAL; | 631 | retval = -EINVAL; |
644 | goto err_bindx_rem; | 632 | goto err_bindx_rem; |
645 | } | 633 | } |
@@ -654,7 +642,7 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) | |||
654 | sctp_local_bh_disable(); | 642 | sctp_local_bh_disable(); |
655 | sctp_write_lock(&ep->base.addr_lock); | 643 | sctp_write_lock(&ep->base.addr_lock); |
656 | 644 | ||
657 | retval = sctp_del_bind_addr(bp, &saveaddr); | 645 | retval = sctp_del_bind_addr(bp, sa_addr); |
658 | 646 | ||
659 | sctp_write_unlock(&ep->base.addr_lock); | 647 | sctp_write_unlock(&ep->base.addr_lock); |
660 | sctp_local_bh_enable(); | 648 | sctp_local_bh_enable(); |
@@ -693,7 +681,6 @@ static int sctp_send_asconf_del_ip(struct sock *sk, | |||
693 | struct sctp_bind_addr *bp; | 681 | struct sctp_bind_addr *bp; |
694 | struct sctp_chunk *chunk; | 682 | struct sctp_chunk *chunk; |
695 | union sctp_addr *laddr; | 683 | union sctp_addr *laddr; |
696 | union sctp_addr saveaddr; | ||
697 | void *addr_buf; | 684 | void *addr_buf; |
698 | struct sctp_af *af; | 685 | struct sctp_af *af; |
699 | struct list_head *pos, *pos1; | 686 | struct list_head *pos, *pos1; |
@@ -773,13 +760,11 @@ static int sctp_send_asconf_del_ip(struct sock *sk, | |||
773 | for (i = 0; i < addrcnt; i++) { | 760 | for (i = 0; i < addrcnt; i++) { |
774 | laddr = (union sctp_addr *)addr_buf; | 761 | laddr = (union sctp_addr *)addr_buf; |
775 | af = sctp_get_af_specific(laddr->v4.sin_family); | 762 | af = sctp_get_af_specific(laddr->v4.sin_family); |
776 | memcpy(&saveaddr, laddr, af->sockaddr_len); | ||
777 | saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port); | ||
778 | list_for_each(pos1, &bp->address_list) { | 763 | list_for_each(pos1, &bp->address_list) { |
779 | saddr = list_entry(pos1, | 764 | saddr = list_entry(pos1, |
780 | struct sctp_sockaddr_entry, | 765 | struct sctp_sockaddr_entry, |
781 | list); | 766 | list); |
782 | if (sctp_cmp_addr_exact(&saddr->a, &saveaddr)) | 767 | if (sctp_cmp_addr_exact(&saddr->a, laddr)) |
783 | saddr->use_as_src = 0; | 768 | saddr->use_as_src = 0; |
784 | } | 769 | } |
785 | addr_buf += af->sockaddr_len; | 770 | addr_buf += af->sockaddr_len; |
@@ -979,7 +964,7 @@ static int __sctp_connect(struct sock* sk, | |||
979 | int err = 0; | 964 | int err = 0; |
980 | int addrcnt = 0; | 965 | int addrcnt = 0; |
981 | int walk_size = 0; | 966 | int walk_size = 0; |
982 | struct sockaddr *sa_addr; | 967 | union sctp_addr *sa_addr; |
983 | void *addr_buf; | 968 | void *addr_buf; |
984 | 969 | ||
985 | sp = sctp_sk(sk); | 970 | sp = sctp_sk(sk); |
@@ -999,8 +984,8 @@ static int __sctp_connect(struct sock* sk, | |||
999 | /* Walk through the addrs buffer and count the number of addresses. */ | 984 | /* Walk through the addrs buffer and count the number of addresses. */ |
1000 | addr_buf = kaddrs; | 985 | addr_buf = kaddrs; |
1001 | while (walk_size < addrs_size) { | 986 | while (walk_size < addrs_size) { |
1002 | sa_addr = (struct sockaddr *)addr_buf; | 987 | sa_addr = (union sctp_addr *)addr_buf; |
1003 | af = sctp_get_af_specific(sa_addr->sa_family); | 988 | af = sctp_get_af_specific(sa_addr->sa.sa_family); |
1004 | 989 | ||
1005 | /* If the address family is not supported or if this address | 990 | /* If the address family is not supported or if this address |
1006 | * causes the address buffer to overflow return EINVAL. | 991 | * causes the address buffer to overflow return EINVAL. |
@@ -1010,18 +995,16 @@ static int __sctp_connect(struct sock* sk, | |||
1010 | goto out_free; | 995 | goto out_free; |
1011 | } | 996 | } |
1012 | 997 | ||
1013 | err = sctp_verify_addr(sk, (union sctp_addr *)sa_addr, | 998 | err = sctp_verify_addr(sk, sa_addr, af->sockaddr_len); |
1014 | af->sockaddr_len); | ||
1015 | if (err) | 999 | if (err) |
1016 | goto out_free; | 1000 | goto out_free; |
1017 | 1001 | ||
1018 | memcpy(&to, sa_addr, af->sockaddr_len); | 1002 | memcpy(&to, sa_addr, af->sockaddr_len); |
1019 | to.v4.sin_port = ntohs(to.v4.sin_port); | ||
1020 | 1003 | ||
1021 | /* Check if there already is a matching association on the | 1004 | /* Check if there already is a matching association on the |
1022 | * endpoint (other than the one created here). | 1005 | * endpoint (other than the one created here). |
1023 | */ | 1006 | */ |
1024 | asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); | 1007 | asoc2 = sctp_endpoint_lookup_assoc(ep, sa_addr, &transport); |
1025 | if (asoc2 && asoc2 != asoc) { | 1008 | if (asoc2 && asoc2 != asoc) { |
1026 | if (asoc2->state >= SCTP_STATE_ESTABLISHED) | 1009 | if (asoc2->state >= SCTP_STATE_ESTABLISHED) |
1027 | err = -EISCONN; | 1010 | err = -EISCONN; |
@@ -1034,7 +1017,7 @@ static int __sctp_connect(struct sock* sk, | |||
1034 | * make sure that there is no peeled-off association matching | 1017 | * make sure that there is no peeled-off association matching |
1035 | * the peer address even on another socket. | 1018 | * the peer address even on another socket. |
1036 | */ | 1019 | */ |
1037 | if (sctp_endpoint_is_peeled_off(ep, &to)) { | 1020 | if (sctp_endpoint_is_peeled_off(ep, sa_addr)) { |
1038 | err = -EADDRNOTAVAIL; | 1021 | err = -EADDRNOTAVAIL; |
1039 | goto out_free; | 1022 | goto out_free; |
1040 | } | 1023 | } |
@@ -1065,7 +1048,7 @@ static int __sctp_connect(struct sock* sk, | |||
1065 | } | 1048 | } |
1066 | } | 1049 | } |
1067 | 1050 | ||
1068 | scope = sctp_scope(&to); | 1051 | scope = sctp_scope(sa_addr); |
1069 | asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); | 1052 | asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); |
1070 | if (!asoc) { | 1053 | if (!asoc) { |
1071 | err = -ENOMEM; | 1054 | err = -ENOMEM; |
@@ -1074,7 +1057,7 @@ static int __sctp_connect(struct sock* sk, | |||
1074 | } | 1057 | } |
1075 | 1058 | ||
1076 | /* Prime the peer's transport structures. */ | 1059 | /* Prime the peer's transport structures. */ |
1077 | transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, | 1060 | transport = sctp_assoc_add_peer(asoc, sa_addr, GFP_KERNEL, |
1078 | SCTP_UNKNOWN); | 1061 | SCTP_UNKNOWN); |
1079 | if (!transport) { | 1062 | if (!transport) { |
1080 | err = -ENOMEM; | 1063 | err = -ENOMEM; |
@@ -1427,11 +1410,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
1427 | if (msg_namelen > sizeof(to)) | 1410 | if (msg_namelen > sizeof(to)) |
1428 | msg_namelen = sizeof(to); | 1411 | msg_namelen = sizeof(to); |
1429 | memcpy(&to, msg->msg_name, msg_namelen); | 1412 | memcpy(&to, msg->msg_name, msg_namelen); |
1430 | SCTP_DEBUG_PRINTK("Just memcpy'd. msg_name is " | ||
1431 | "0x%x:%u.\n", | ||
1432 | to.v4.sin_addr.s_addr, to.v4.sin_port); | ||
1433 | |||
1434 | to.v4.sin_port = ntohs(to.v4.sin_port); | ||
1435 | msg_name = msg->msg_name; | 1413 | msg_name = msg->msg_name; |
1436 | } | 1414 | } |
1437 | 1415 | ||
@@ -3217,8 +3195,8 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, | |||
3217 | status.sstat_outstrms = asoc->c.sinit_num_ostreams; | 3195 | status.sstat_outstrms = asoc->c.sinit_num_ostreams; |
3218 | status.sstat_fragmentation_point = asoc->frag_point; | 3196 | status.sstat_fragmentation_point = asoc->frag_point; |
3219 | status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); | 3197 | status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); |
3220 | memcpy(&status.sstat_primary.spinfo_address, | 3198 | memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, |
3221 | &(transport->ipaddr), sizeof(union sctp_addr)); | 3199 | transport->af_specific->sockaddr_len); |
3222 | /* Map ipv4 address into v4-mapped-on-v6 address. */ | 3200 | /* Map ipv4 address into v4-mapped-on-v6 address. */ |
3223 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), | 3201 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), |
3224 | (union sctp_addr *)&status.sstat_primary.spinfo_address); | 3202 | (union sctp_addr *)&status.sstat_primary.spinfo_address); |
@@ -3770,7 +3748,6 @@ static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len, | |||
3770 | memcpy(&temp, &from->ipaddr, sizeof(temp)); | 3748 | memcpy(&temp, &from->ipaddr, sizeof(temp)); |
3771 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); | 3749 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); |
3772 | addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; | 3750 | addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; |
3773 | temp.v4.sin_port = htons(temp.v4.sin_port); | ||
3774 | if (copy_to_user(to, &temp, addrlen)) | 3751 | if (copy_to_user(to, &temp, addrlen)) |
3775 | return -EFAULT; | 3752 | return -EFAULT; |
3776 | to += addrlen ; | 3753 | to += addrlen ; |
@@ -3821,7 +3798,6 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, | |||
3821 | addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; | 3798 | addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; |
3822 | if(space_left < addrlen) | 3799 | if(space_left < addrlen) |
3823 | return -ENOMEM; | 3800 | return -ENOMEM; |
3824 | temp.v4.sin_port = htons(temp.v4.sin_port); | ||
3825 | if (copy_to_user(to, &temp, addrlen)) | 3801 | if (copy_to_user(to, &temp, addrlen)) |
3826 | return -EFAULT; | 3802 | return -EFAULT; |
3827 | to += addrlen; | 3803 | to += addrlen; |
@@ -3889,7 +3865,7 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len, | |||
3889 | struct sctp_sockaddr_entry, | 3865 | struct sctp_sockaddr_entry, |
3890 | list); | 3866 | list); |
3891 | if ((PF_INET == sk->sk_family) && | 3867 | if ((PF_INET == sk->sk_family) && |
3892 | (AF_INET6 == addr->a.sa.sa_family)) | 3868 | (AF_INET6 == addr->a.sa.sa_family)) |
3893 | continue; | 3869 | continue; |
3894 | cnt++; | 3870 | cnt++; |
3895 | } | 3871 | } |
@@ -3933,7 +3909,6 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add | |||
3933 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), | 3909 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), |
3934 | &temp); | 3910 | &temp); |
3935 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 3911 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
3936 | temp.v4.sin_port = htons(port); | ||
3937 | if (copy_to_user(to, &temp, addrlen)) { | 3912 | if (copy_to_user(to, &temp, addrlen)) { |
3938 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, | 3913 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, |
3939 | flags); | 3914 | flags); |
@@ -3970,7 +3945,6 @@ static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, | |||
3970 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 3945 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
3971 | if(space_left<addrlen) | 3946 | if(space_left<addrlen) |
3972 | return -ENOMEM; | 3947 | return -ENOMEM; |
3973 | temp.v4.sin_port = htons(port); | ||
3974 | if (copy_to_user(*to, &temp, addrlen)) { | 3948 | if (copy_to_user(*to, &temp, addrlen)) { |
3975 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, | 3949 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, |
3976 | flags); | 3950 | flags); |
@@ -4055,7 +4029,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, | |||
4055 | memcpy(&temp, &addr->a, sizeof(temp)); | 4029 | memcpy(&temp, &addr->a, sizeof(temp)); |
4056 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); | 4030 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); |
4057 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 4031 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
4058 | temp.v4.sin_port = htons(temp.v4.sin_port); | ||
4059 | if (copy_to_user(to, &temp, addrlen)) { | 4032 | if (copy_to_user(to, &temp, addrlen)) { |
4060 | err = -EFAULT; | 4033 | err = -EFAULT; |
4061 | goto unlock; | 4034 | goto unlock; |
@@ -4146,7 +4119,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
4146 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | 4119 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; |
4147 | if(space_left < addrlen) | 4120 | if(space_left < addrlen) |
4148 | return -ENOMEM; /*fixme: right error?*/ | 4121 | return -ENOMEM; /*fixme: right error?*/ |
4149 | temp.v4.sin_port = htons(temp.v4.sin_port); | ||
4150 | if (copy_to_user(to, &temp, addrlen)) { | 4122 | if (copy_to_user(to, &temp, addrlen)) { |
4151 | err = -EFAULT; | 4123 | err = -EFAULT; |
4152 | goto unlock; | 4124 | goto unlock; |
@@ -4194,12 +4166,8 @@ static int sctp_getsockopt_primary_addr(struct sock *sk, int len, | |||
4194 | if (!asoc->peer.primary_path) | 4166 | if (!asoc->peer.primary_path) |
4195 | return -ENOTCONN; | 4167 | return -ENOTCONN; |
4196 | 4168 | ||
4197 | asoc->peer.primary_path->ipaddr.v4.sin_port = | ||
4198 | htons(asoc->peer.primary_path->ipaddr.v4.sin_port); | ||
4199 | memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, | 4169 | memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, |
4200 | sizeof(union sctp_addr)); | 4170 | asoc->peer.primary_path->af_specific->sockaddr_len); |
4201 | asoc->peer.primary_path->ipaddr.v4.sin_port = | ||
4202 | ntohs(asoc->peer.primary_path->ipaddr.v4.sin_port); | ||
4203 | 4171 | ||
4204 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, | 4172 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, |
4205 | (union sctp_addr *)&prim.ssp_addr); | 4173 | (union sctp_addr *)&prim.ssp_addr); |
@@ -4645,9 +4613,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) | |||
4645 | unsigned short snum; | 4613 | unsigned short snum; |
4646 | int ret; | 4614 | int ret; |
4647 | 4615 | ||
4648 | /* NOTE: Remember to put this back to net order. */ | 4616 | snum = ntohs(addr->v4.sin_port); |
4649 | addr->v4.sin_port = ntohs(addr->v4.sin_port); | ||
4650 | snum = addr->v4.sin_port; | ||
4651 | 4617 | ||
4652 | SCTP_DEBUG_PRINTK("sctp_get_port() begins, snum=%d\n", snum); | 4618 | SCTP_DEBUG_PRINTK("sctp_get_port() begins, snum=%d\n", snum); |
4653 | sctp_local_bh_disable(); | 4619 | sctp_local_bh_disable(); |
@@ -4784,7 +4750,6 @@ fail_unlock: | |||
4784 | 4750 | ||
4785 | fail: | 4751 | fail: |
4786 | sctp_local_bh_enable(); | 4752 | sctp_local_bh_enable(); |
4787 | addr->v4.sin_port = htons(addr->v4.sin_port); | ||
4788 | return ret; | 4753 | return ret; |
4789 | } | 4754 | } |
4790 | 4755 | ||
@@ -5083,7 +5048,7 @@ static int sctp_autobind(struct sock *sk) | |||
5083 | { | 5048 | { |
5084 | union sctp_addr autoaddr; | 5049 | union sctp_addr autoaddr; |
5085 | struct sctp_af *af; | 5050 | struct sctp_af *af; |
5086 | unsigned short port; | 5051 | __be16 port; |
5087 | 5052 | ||
5088 | /* Initialize a local sockaddr structure to INADDR_ANY. */ | 5053 | /* Initialize a local sockaddr structure to INADDR_ANY. */ |
5089 | af = sctp_sk(sk)->pf->af; | 5054 | af = sctp_sk(sk)->pf->af; |
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index ac4fae161bc7..42d9498c64fa 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c | |||
@@ -401,13 +401,14 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map) | |||
401 | 401 | ||
402 | /* Refresh the gap ack information. */ | 402 | /* Refresh the gap ack information. */ |
403 | if (sctp_tsnmap_has_gap(map)) { | 403 | if (sctp_tsnmap_has_gap(map)) { |
404 | __u16 start, end; | ||
404 | sctp_tsnmap_iter_init(map, &iter); | 405 | sctp_tsnmap_iter_init(map, &iter); |
405 | while (sctp_tsnmap_next_gap_ack(map, &iter, | 406 | while (sctp_tsnmap_next_gap_ack(map, &iter, |
406 | &map->gabs[gabs].start, | 407 | &start, |
407 | &map->gabs[gabs].end)) { | 408 | &end)) { |
408 | 409 | ||
409 | map->gabs[gabs].start = htons(map->gabs[gabs].start); | 410 | map->gabs[gabs].start = htons(start); |
410 | map->gabs[gabs].end = htons(map->gabs[gabs].end); | 411 | map->gabs[gabs].end = htons(end); |
411 | gabs++; | 412 | gabs++; |
412 | if (gabs >= SCTP_MAX_GABS) | 413 | if (gabs >= SCTP_MAX_GABS) |
413 | break; | 414 | break; |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index a015283a9087..e255a709f1b7 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
@@ -351,7 +351,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error( | |||
351 | struct sctp_remote_error *sre; | 351 | struct sctp_remote_error *sre; |
352 | struct sk_buff *skb; | 352 | struct sk_buff *skb; |
353 | sctp_errhdr_t *ch; | 353 | sctp_errhdr_t *ch; |
354 | __u16 cause; | 354 | __be16 cause; |
355 | int elen; | 355 | int elen; |
356 | 356 | ||
357 | ch = (sctp_errhdr_t *)(chunk->skb->data); | 357 | ch = (sctp_errhdr_t *)(chunk->skb->data); |
diff --git a/net/socket.c b/net/socket.c index 6c9b9b326d76..e8db54702a69 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -77,7 +77,6 @@ | |||
77 | #include <linux/cache.h> | 77 | #include <linux/cache.h> |
78 | #include <linux/module.h> | 78 | #include <linux/module.h> |
79 | #include <linux/highmem.h> | 79 | #include <linux/highmem.h> |
80 | #include <linux/divert.h> | ||
81 | #include <linux/mount.h> | 80 | #include <linux/mount.h> |
82 | #include <linux/security.h> | 81 | #include <linux/security.h> |
83 | #include <linux/syscalls.h> | 82 | #include <linux/syscalls.h> |
@@ -852,11 +851,6 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
852 | err = vlan_ioctl_hook(argp); | 851 | err = vlan_ioctl_hook(argp); |
853 | mutex_unlock(&vlan_ioctl_mutex); | 852 | mutex_unlock(&vlan_ioctl_mutex); |
854 | break; | 853 | break; |
855 | case SIOCGIFDIVERT: | ||
856 | case SIOCSIFDIVERT: | ||
857 | /* Convert this to call through a hook */ | ||
858 | err = divert_ioctl(cmd, argp); | ||
859 | break; | ||
860 | case SIOCADDDLCI: | 854 | case SIOCADDDLCI: |
861 | case SIOCDELDLCI: | 855 | case SIOCDELDLCI: |
862 | err = -ENOPKG; | 856 | err = -ENOPKG; |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index b36b9463f5a4..e5a84a482e57 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -198,11 +198,10 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) | |||
198 | q = (const void *)((const char *)p + len); | 198 | q = (const void *)((const char *)p + len); |
199 | if (unlikely(q > end || q < p)) | 199 | if (unlikely(q > end || q < p)) |
200 | return ERR_PTR(-EFAULT); | 200 | return ERR_PTR(-EFAULT); |
201 | dest->data = kmalloc(len, GFP_KERNEL); | 201 | dest->data = kmemdup(p, len, GFP_KERNEL); |
202 | if (unlikely(dest->data == NULL)) | 202 | if (unlikely(dest->data == NULL)) |
203 | return ERR_PTR(-ENOMEM); | 203 | return ERR_PTR(-ENOMEM); |
204 | dest->len = len; | 204 | dest->len = len; |
205 | memcpy(dest->data, p, len); | ||
206 | return q; | 205 | return q; |
207 | } | 206 | } |
208 | 207 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 325e72e4fd31..754b8cd6439f 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
@@ -70,10 +70,9 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) | |||
70 | q = (const void *)((const char *)p + len); | 70 | q = (const void *)((const char *)p + len); |
71 | if (unlikely(q > end || q < p)) | 71 | if (unlikely(q > end || q < p)) |
72 | return ERR_PTR(-EFAULT); | 72 | return ERR_PTR(-EFAULT); |
73 | res->data = kmalloc(len, GFP_KERNEL); | 73 | res->data = kmemdup(p, len, GFP_KERNEL); |
74 | if (unlikely(res->data == NULL)) | 74 | if (unlikely(res->data == NULL)) |
75 | return ERR_PTR(-ENOMEM); | 75 | return ERR_PTR(-ENOMEM); |
76 | memcpy(res->data, p, len); | ||
77 | res->len = len; | 76 | res->len = len; |
78 | return q; | 77 | return q; |
79 | } | 78 | } |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index bdedf456bc17..d57f60838895 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c | |||
@@ -76,10 +76,9 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) | |||
76 | q = (const void *)((const char *)p + len); | 76 | q = (const void *)((const char *)p + len); |
77 | if (unlikely(q > end || q < p)) | 77 | if (unlikely(q > end || q < p)) |
78 | return ERR_PTR(-EFAULT); | 78 | return ERR_PTR(-EFAULT); |
79 | res->data = kmalloc(len, GFP_KERNEL); | 79 | res->data = kmemdup(p, len, GFP_KERNEL); |
80 | if (unlikely(res->data == NULL)) | 80 | if (unlikely(res->data == NULL)) |
81 | return ERR_PTR(-ENOMEM); | 81 | return ERR_PTR(-ENOMEM); |
82 | memcpy(res->data, p, len); | ||
83 | return q; | 82 | return q; |
84 | } | 83 | } |
85 | 84 | ||
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 1f0f079ffa65..700353b330fd 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -113,9 +113,7 @@ static int rsi_match(struct cache_head *a, struct cache_head *b) | |||
113 | static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len) | 113 | static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len) |
114 | { | 114 | { |
115 | dst->len = len; | 115 | dst->len = len; |
116 | dst->data = (len ? kmalloc(len, GFP_KERNEL) : NULL); | 116 | dst->data = (len ? kmemdup(src, len, GFP_KERNEL) : NULL); |
117 | if (dst->data) | ||
118 | memcpy(dst->data, src, len); | ||
119 | if (len && !dst->data) | 117 | if (len && !dst->data) |
120 | return -ENOMEM; | 118 | return -ENOMEM; |
121 | return 0; | 119 | return 0; |
@@ -756,10 +754,9 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) | |||
756 | if (!new) | 754 | if (!new) |
757 | goto out; | 755 | goto out; |
758 | kref_init(&new->h.ref); | 756 | kref_init(&new->h.ref); |
759 | new->h.name = kmalloc(strlen(name) + 1, GFP_KERNEL); | 757 | new->h.name = kstrdup(name, GFP_KERNEL); |
760 | if (!new->h.name) | 758 | if (!new->h.name) |
761 | goto out_free_dom; | 759 | goto out_free_dom; |
762 | strcpy(new->h.name, name); | ||
763 | new->h.flavour = &svcauthops_gss; | 760 | new->h.flavour = &svcauthops_gss; |
764 | new->pseudoflavor = pseudoflavor; | 761 | new->pseudoflavor = pseudoflavor; |
765 | 762 | ||
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 78696f2dc7d6..dfeea4fea95a 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -253,10 +253,9 @@ rpc_clone_client(struct rpc_clnt *clnt) | |||
253 | { | 253 | { |
254 | struct rpc_clnt *new; | 254 | struct rpc_clnt *new; |
255 | 255 | ||
256 | new = kmalloc(sizeof(*new), GFP_KERNEL); | 256 | new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); |
257 | if (!new) | 257 | if (!new) |
258 | goto out_no_clnt; | 258 | goto out_no_clnt; |
259 | memcpy(new, clnt, sizeof(*new)); | ||
260 | atomic_set(&new->cl_count, 1); | 259 | atomic_set(&new->cl_count, 1); |
261 | atomic_set(&new->cl_users, 0); | 260 | atomic_set(&new->cl_users, 0); |
262 | new->cl_parent = clnt; | 261 | new->cl_parent = clnt; |
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c index 6f17527b9e69..2635c543ba06 100644 --- a/net/sunrpc/socklib.c +++ b/net/sunrpc/socklib.c | |||
@@ -45,7 +45,8 @@ static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len) | |||
45 | */ | 45 | */ |
46 | static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) | 46 | static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) |
47 | { | 47 | { |
48 | unsigned int csum2, pos; | 48 | unsigned int pos; |
49 | __wsum csum2; | ||
49 | 50 | ||
50 | if (len > desc->count) | 51 | if (len > desc->count) |
51 | len = desc->count; | 52 | len = desc->count; |
@@ -160,13 +161,13 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) | |||
160 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) | 161 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) |
161 | return -1; | 162 | return -1; |
162 | if (desc.offset != skb->len) { | 163 | if (desc.offset != skb->len) { |
163 | unsigned int csum2; | 164 | __wsum csum2; |
164 | csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); | 165 | csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); |
165 | desc.csum = csum_block_add(desc.csum, csum2, desc.offset); | 166 | desc.csum = csum_block_add(desc.csum, csum2, desc.offset); |
166 | } | 167 | } |
167 | if (desc.count) | 168 | if (desc.count) |
168 | return -1; | 169 | return -1; |
169 | if ((unsigned short)csum_fold(desc.csum)) | 170 | if (csum_fold(desc.csum)) |
170 | return -1; | 171 | return -1; |
171 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) | 172 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) |
172 | netdev_rx_csum_fault(skb->dev); | 173 | netdev_rx_csum_fault(skb->dev); |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index e1bd933629fe..a0a953a430c2 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
@@ -101,9 +101,9 @@ static void ip_map_put(struct kref *kref) | |||
101 | * IP addresses in reverse-endian (i.e. on a little-endian machine). | 101 | * IP addresses in reverse-endian (i.e. on a little-endian machine). |
102 | * So use a trivial but reliable hash instead | 102 | * So use a trivial but reliable hash instead |
103 | */ | 103 | */ |
104 | static inline int hash_ip(unsigned long ip) | 104 | static inline int hash_ip(__be32 ip) |
105 | { | 105 | { |
106 | int hash = ip ^ (ip>>16); | 106 | int hash = (__force u32)ip ^ ((__force u32)ip>>16); |
107 | return (hash ^ (hash>>8)) & 0xff; | 107 | return (hash ^ (hash>>8)) & 0xff; |
108 | } | 108 | } |
109 | #endif | 109 | #endif |
@@ -284,7 +284,7 @@ static struct ip_map *ip_map_lookup(char *class, struct in_addr addr) | |||
284 | ip.m_addr = addr; | 284 | ip.m_addr = addr; |
285 | ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h, | 285 | ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h, |
286 | hash_str(class, IP_HASHBITS) ^ | 286 | hash_str(class, IP_HASHBITS) ^ |
287 | hash_ip((unsigned long)addr.s_addr)); | 287 | hash_ip(addr.s_addr)); |
288 | 288 | ||
289 | if (ch) | 289 | if (ch) |
290 | return container_of(ch, struct ip_map, h); | 290 | return container_of(ch, struct ip_map, h); |
@@ -313,7 +313,7 @@ static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t ex | |||
313 | ch = sunrpc_cache_update(&ip_map_cache, | 313 | ch = sunrpc_cache_update(&ip_map_cache, |
314 | &ip.h, &ipm->h, | 314 | &ip.h, &ipm->h, |
315 | hash_str(ipm->m_class, IP_HASHBITS) ^ | 315 | hash_str(ipm->m_class, IP_HASHBITS) ^ |
316 | hash_ip((unsigned long)ipm->m_addr.s_addr)); | 316 | hash_ip(ipm->m_addr.s_addr)); |
317 | if (!ch) | 317 | if (!ch) |
318 | return -ENOMEM; | 318 | return -ENOMEM; |
319 | cache_put(ch, &ip_map_cache); | 319 | cache_put(ch, &ip_map_cache); |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 1bb75703f384..730c5c47ed8d 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -774,8 +774,8 @@ int tipc_bclink_set_queue_limits(u32 limit) | |||
774 | 774 | ||
775 | int tipc_bclink_init(void) | 775 | int tipc_bclink_init(void) |
776 | { | 776 | { |
777 | bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC); | 777 | bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC); |
778 | bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC); | 778 | bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC); |
779 | if (!bcbearer || !bclink) { | 779 | if (!bcbearer || !bclink) { |
780 | nomem: | 780 | nomem: |
781 | warn("Multicast link creation failed, no memory\n"); | 781 | warn("Multicast link creation failed, no memory\n"); |
@@ -786,14 +786,12 @@ int tipc_bclink_init(void) | |||
786 | return -ENOMEM; | 786 | return -ENOMEM; |
787 | } | 787 | } |
788 | 788 | ||
789 | memset(bcbearer, 0, sizeof(struct bcbearer)); | ||
790 | INIT_LIST_HEAD(&bcbearer->bearer.cong_links); | 789 | INIT_LIST_HEAD(&bcbearer->bearer.cong_links); |
791 | bcbearer->bearer.media = &bcbearer->media; | 790 | bcbearer->bearer.media = &bcbearer->media; |
792 | bcbearer->media.send_msg = tipc_bcbearer_send; | 791 | bcbearer->media.send_msg = tipc_bcbearer_send; |
793 | sprintf(bcbearer->media.name, "tipc-multicast"); | 792 | sprintf(bcbearer->media.name, "tipc-multicast"); |
794 | 793 | ||
795 | bcl = &bclink->link; | 794 | bcl = &bclink->link; |
796 | memset(bclink, 0, sizeof(struct bclink)); | ||
797 | INIT_LIST_HEAD(&bcl->waiting_ports); | 795 | INIT_LIST_HEAD(&bcl->waiting_ports); |
798 | bcl->next_out_no = 1; | 796 | bcl->next_out_no = 1; |
799 | spin_lock_init(&bclink->node.lock); | 797 | spin_lock_init(&bclink->node.lock); |
diff --git a/net/tipc/config.c b/net/tipc/config.c index ed1351ed05e1..458a2c46cef3 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c | |||
@@ -107,7 +107,7 @@ int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type, | |||
107 | struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value) | 107 | struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value) |
108 | { | 108 | { |
109 | struct sk_buff *buf; | 109 | struct sk_buff *buf; |
110 | u32 value_net; | 110 | __be32 value_net; |
111 | 111 | ||
112 | buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value))); | 112 | buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value))); |
113 | if (buf) { | 113 | if (buf) { |
@@ -284,8 +284,7 @@ static struct sk_buff *cfg_set_own_addr(void) | |||
284 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) | 284 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) |
285 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | 285 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); |
286 | 286 | ||
287 | addr = *(u32 *)TLV_DATA(req_tlv_area); | 287 | addr = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); |
288 | addr = ntohl(addr); | ||
289 | if (addr == tipc_own_addr) | 288 | if (addr == tipc_own_addr) |
290 | return tipc_cfg_reply_none(); | 289 | return tipc_cfg_reply_none(); |
291 | if (!tipc_addr_node_valid(addr)) | 290 | if (!tipc_addr_node_valid(addr)) |
@@ -319,8 +318,7 @@ static struct sk_buff *cfg_set_remote_mng(void) | |||
319 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | 318 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) |
320 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | 319 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); |
321 | 320 | ||
322 | value = *(u32 *)TLV_DATA(req_tlv_area); | 321 | value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); |
323 | value = ntohl(value); | ||
324 | tipc_remote_management = (value != 0); | 322 | tipc_remote_management = (value != 0); |
325 | return tipc_cfg_reply_none(); | 323 | return tipc_cfg_reply_none(); |
326 | } | 324 | } |
@@ -332,8 +330,7 @@ static struct sk_buff *cfg_set_max_publications(void) | |||
332 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | 330 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) |
333 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | 331 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); |
334 | 332 | ||
335 | value = *(u32 *)TLV_DATA(req_tlv_area); | 333 | value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); |
336 | value = ntohl(value); | ||
337 | if (value != delimit(value, 1, 65535)) | 334 | if (value != delimit(value, 1, 65535)) |
338 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | 335 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE |
339 | " (max publications must be 1-65535)"); | 336 | " (max publications must be 1-65535)"); |
@@ -348,8 +345,7 @@ static struct sk_buff *cfg_set_max_subscriptions(void) | |||
348 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | 345 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) |
349 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | 346 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); |
350 | 347 | ||
351 | value = *(u32 *)TLV_DATA(req_tlv_area); | 348 | value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); |
352 | value = ntohl(value); | ||
353 | if (value != delimit(value, 1, 65535)) | 349 | if (value != delimit(value, 1, 65535)) |
354 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | 350 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE |
355 | " (max subscriptions must be 1-65535"); | 351 | " (max subscriptions must be 1-65535"); |
@@ -363,8 +359,7 @@ static struct sk_buff *cfg_set_max_ports(void) | |||
363 | 359 | ||
364 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | 360 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) |
365 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | 361 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); |
366 | value = *(u32 *)TLV_DATA(req_tlv_area); | 362 | value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); |
367 | value = ntohl(value); | ||
368 | if (value == tipc_max_ports) | 363 | if (value == tipc_max_ports) |
369 | return tipc_cfg_reply_none(); | 364 | return tipc_cfg_reply_none(); |
370 | if (value != delimit(value, 127, 65535)) | 365 | if (value != delimit(value, 127, 65535)) |
@@ -383,8 +378,7 @@ static struct sk_buff *cfg_set_max_zones(void) | |||
383 | 378 | ||
384 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | 379 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) |
385 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | 380 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); |
386 | value = *(u32 *)TLV_DATA(req_tlv_area); | 381 | value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); |
387 | value = ntohl(value); | ||
388 | if (value == tipc_max_zones) | 382 | if (value == tipc_max_zones) |
389 | return tipc_cfg_reply_none(); | 383 | return tipc_cfg_reply_none(); |
390 | if (value != delimit(value, 1, 255)) | 384 | if (value != delimit(value, 1, 255)) |
@@ -403,8 +397,7 @@ static struct sk_buff *cfg_set_max_clusters(void) | |||
403 | 397 | ||
404 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | 398 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) |
405 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | 399 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); |
406 | value = *(u32 *)TLV_DATA(req_tlv_area); | 400 | value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); |
407 | value = ntohl(value); | ||
408 | if (value != delimit(value, 1, 1)) | 401 | if (value != delimit(value, 1, 1)) |
409 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | 402 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE |
410 | " (max clusters fixed at 1)"); | 403 | " (max clusters fixed at 1)"); |
@@ -417,8 +410,7 @@ static struct sk_buff *cfg_set_max_nodes(void) | |||
417 | 410 | ||
418 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | 411 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) |
419 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | 412 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); |
420 | value = *(u32 *)TLV_DATA(req_tlv_area); | 413 | value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); |
421 | value = ntohl(value); | ||
422 | if (value == tipc_max_nodes) | 414 | if (value == tipc_max_nodes) |
423 | return tipc_cfg_reply_none(); | 415 | return tipc_cfg_reply_none(); |
424 | if (value != delimit(value, 8, 2047)) | 416 | if (value != delimit(value, 8, 2047)) |
@@ -437,8 +429,7 @@ static struct sk_buff *cfg_set_max_slaves(void) | |||
437 | 429 | ||
438 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | 430 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) |
439 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | 431 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); |
440 | value = *(u32 *)TLV_DATA(req_tlv_area); | 432 | value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); |
441 | value = ntohl(value); | ||
442 | if (value != 0) | 433 | if (value != 0) |
443 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | 434 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED |
444 | " (max secondary nodes fixed at 0)"); | 435 | " (max secondary nodes fixed at 0)"); |
@@ -451,8 +442,7 @@ static struct sk_buff *cfg_set_netid(void) | |||
451 | 442 | ||
452 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | 443 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) |
453 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | 444 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); |
454 | value = *(u32 *)TLV_DATA(req_tlv_area); | 445 | value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); |
455 | value = ntohl(value); | ||
456 | if (value == tipc_net_id) | 446 | if (value == tipc_net_id) |
457 | return tipc_cfg_reply_none(); | 447 | return tipc_cfg_reply_none(); |
458 | if (value != delimit(value, 1, 9999)) | 448 | if (value != delimit(value, 1, 9999)) |
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c index d8af4c28695d..627f99b7afdf 100644 --- a/net/tipc/dbg.c +++ b/net/tipc/dbg.c | |||
@@ -393,8 +393,7 @@ struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space) | |||
393 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) | 393 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) |
394 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | 394 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); |
395 | 395 | ||
396 | value = *(u32 *)TLV_DATA(req_tlv_area); | 396 | value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); |
397 | value = ntohl(value); | ||
398 | if (value != delimit(value, 0, 32768)) | 397 | if (value != delimit(value, 0, 32768)) |
399 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | 398 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE |
400 | " (log size must be 0-32768)"); | 399 | " (log size must be 0-32768)"); |
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 03bd659c43ca..7bf87cb26ef3 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c | |||
@@ -66,11 +66,11 @@ | |||
66 | */ | 66 | */ |
67 | 67 | ||
68 | struct distr_item { | 68 | struct distr_item { |
69 | u32 type; | 69 | __be32 type; |
70 | u32 lower; | 70 | __be32 lower; |
71 | u32 upper; | 71 | __be32 upper; |
72 | u32 ref; | 72 | __be32 ref; |
73 | u32 key; | 73 | __be32 key; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | /** | 76 | /** |
diff --git a/net/tipc/node.c b/net/tipc/node.c index 886bda5e88db..4111a31def79 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -60,7 +60,7 @@ struct node *tipc_node_create(u32 addr) | |||
60 | struct node *n_ptr; | 60 | struct node *n_ptr; |
61 | struct node **curr_node; | 61 | struct node **curr_node; |
62 | 62 | ||
63 | n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC); | 63 | n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC); |
64 | if (!n_ptr) { | 64 | if (!n_ptr) { |
65 | warn("Node creation failed, no memory\n"); | 65 | warn("Node creation failed, no memory\n"); |
66 | return NULL; | 66 | return NULL; |
@@ -75,7 +75,6 @@ struct node *tipc_node_create(u32 addr) | |||
75 | return NULL; | 75 | return NULL; |
76 | } | 76 | } |
77 | 77 | ||
78 | memset(n_ptr, 0, sizeof(*n_ptr)); | ||
79 | n_ptr->addr = addr; | 78 | n_ptr->addr = addr; |
80 | spin_lock_init(&n_ptr->lock); | 79 | spin_lock_init(&n_ptr->lock); |
81 | INIT_LIST_HEAD(&n_ptr->nsub); | 80 | INIT_LIST_HEAD(&n_ptr->nsub); |
@@ -597,8 +596,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) | |||
597 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) | 596 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) |
598 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | 597 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); |
599 | 598 | ||
600 | domain = *(u32 *)TLV_DATA(req_tlv_area); | 599 | domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); |
601 | domain = ntohl(domain); | ||
602 | if (!tipc_addr_domain_valid(domain)) | 600 | if (!tipc_addr_domain_valid(domain)) |
603 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | 601 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE |
604 | " (network address)"); | 602 | " (network address)"); |
@@ -642,8 +640,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) | |||
642 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) | 640 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) |
643 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | 641 | return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); |
644 | 642 | ||
645 | domain = *(u32 *)TLV_DATA(req_tlv_area); | 643 | domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); |
646 | domain = ntohl(domain); | ||
647 | if (!tipc_addr_domain_valid(domain)) | 644 | if (!tipc_addr_domain_valid(domain)) |
648 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | 645 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE |
649 | " (network address)"); | 646 | " (network address)"); |
@@ -664,8 +661,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) | |||
664 | 661 | ||
665 | /* Add TLV for broadcast link */ | 662 | /* Add TLV for broadcast link */ |
666 | 663 | ||
667 | link_info.dest = tipc_own_addr & 0xfffff00; | 664 | link_info.dest = htonl(tipc_own_addr & 0xfffff00); |
668 | link_info.dest = htonl(link_info.dest); | ||
669 | link_info.up = htonl(1); | 665 | link_info.up = htonl(1); |
670 | sprintf(link_info.str, tipc_bclink_name); | 666 | sprintf(link_info.str, tipc_bclink_name); |
671 | tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); | 667 | tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 7a918f12a5df..ddade7388aa0 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -350,7 +350,7 @@ static void subscr_subscribe(struct tipc_subscr *s, | |||
350 | 350 | ||
351 | /* Allocate subscription object */ | 351 | /* Allocate subscription object */ |
352 | 352 | ||
353 | sub = kmalloc(sizeof(*sub), GFP_ATOMIC); | 353 | sub = kzalloc(sizeof(*sub), GFP_ATOMIC); |
354 | if (!sub) { | 354 | if (!sub) { |
355 | warn("Subscription rejected, no memory\n"); | 355 | warn("Subscription rejected, no memory\n"); |
356 | subscr_terminate(subscriber); | 356 | subscr_terminate(subscriber); |
@@ -359,7 +359,6 @@ static void subscr_subscribe(struct tipc_subscr *s, | |||
359 | 359 | ||
360 | /* Initialize subscription object */ | 360 | /* Initialize subscription object */ |
361 | 361 | ||
362 | memset(sub, 0, sizeof(*sub)); | ||
363 | sub->seq.type = htohl(s->seq.type, subscriber->swap); | 362 | sub->seq.type = htohl(s->seq.type, subscriber->swap); |
364 | sub->seq.lower = htohl(s->seq.lower, subscriber->swap); | 363 | sub->seq.lower = htohl(s->seq.lower, subscriber->swap); |
365 | sub->seq.upper = htohl(s->seq.upper, subscriber->swap); | 364 | sub->seq.upper = htohl(s->seq.upper, subscriber->swap); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index b43a27828df5..2f208c7f4d43 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -151,8 +151,9 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) | |||
151 | * each socket state is protected by separate rwlock. | 151 | * each socket state is protected by separate rwlock. |
152 | */ | 152 | */ |
153 | 153 | ||
154 | static inline unsigned unix_hash_fold(unsigned hash) | 154 | static inline unsigned unix_hash_fold(__wsum n) |
155 | { | 155 | { |
156 | unsigned hash = (__force unsigned)n; | ||
156 | hash ^= hash>>16; | 157 | hash ^= hash>>16; |
157 | hash ^= hash>>8; | 158 | hash ^= hash>>8; |
158 | return hash&(UNIX_HASH_SIZE-1); | 159 | return hash&(UNIX_HASH_SIZE-1); |
diff --git a/net/wanrouter/af_wanpipe.c b/net/wanrouter/af_wanpipe.c index 6f39faa15832..c2059733e15a 100644 --- a/net/wanrouter/af_wanpipe.c +++ b/net/wanrouter/af_wanpipe.c | |||
@@ -13,7 +13,7 @@ | |||
13 | * Due Credit: | 13 | * Due Credit: |
14 | * Wanpipe socket layer is based on Packet and | 14 | * Wanpipe socket layer is based on Packet and |
15 | * the X25 socket layers. The above sockets were | 15 | * the X25 socket layers. The above sockets were |
16 | * used for the specific use of Sangoma Technoloiges | 16 | * used for the specific use of Sangoma Technologies |
17 | * API programs. | 17 | * API programs. |
18 | * Packet socket Authors: Ross Biro, Fred N. van Kempen and | 18 | * Packet socket Authors: Ross Biro, Fred N. van Kempen and |
19 | * Alan Cox. | 19 | * Alan Cox. |
@@ -23,7 +23,7 @@ | |||
23 | * Apr 25, 2000 Nenad Corbic o Added the ability to send zero length packets. | 23 | * Apr 25, 2000 Nenad Corbic o Added the ability to send zero length packets. |
24 | * Mar 13, 2000 Nenad Corbic o Added a tx buffer check via ioctl call. | 24 | * Mar 13, 2000 Nenad Corbic o Added a tx buffer check via ioctl call. |
25 | * Mar 06, 2000 Nenad Corbic o Fixed the corrupt sock lcn problem. | 25 | * Mar 06, 2000 Nenad Corbic o Fixed the corrupt sock lcn problem. |
26 | * Server and client applicaton can run | 26 | * Server and client application can run |
27 | * simultaneously without conflicts. | 27 | * simultaneously without conflicts. |
28 | * Feb 29, 2000 Nenad Corbic o Added support for PVC protocols, such as | 28 | * Feb 29, 2000 Nenad Corbic o Added support for PVC protocols, such as |
29 | * CHDLC, Frame Relay and HDLC API. | 29 | * CHDLC, Frame Relay and HDLC API. |
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c index 9479659277ae..316211d9f17d 100644 --- a/net/wanrouter/wanmain.c +++ b/net/wanrouter/wanmain.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * This module is completely hardware-independent and provides | 4 | * This module is completely hardware-independent and provides |
5 | * the following common services for the WAN Link Drivers: | 5 | * the following common services for the WAN Link Drivers: |
6 | * o WAN device managenment (registering, unregistering) | 6 | * o WAN device management (registering, unregistering) |
7 | * o Network interface management | 7 | * o Network interface management |
8 | * o Physical connection management (dial-up, incoming calls) | 8 | * o Physical connection management (dial-up, incoming calls) |
9 | * o Logical connection management (switched virtual circuits) | 9 | * o Logical connection management (switched virtual circuits) |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index ba924d40df7d..f6c77bd36fdd 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -50,6 +50,40 @@ static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); | |||
50 | static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family); | 50 | static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family); |
51 | static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo); | 51 | static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo); |
52 | 52 | ||
53 | static inline int | ||
54 | __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl) | ||
55 | { | ||
56 | return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) && | ||
57 | addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) && | ||
58 | !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) && | ||
59 | !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) && | ||
60 | (fl->proto == sel->proto || !sel->proto) && | ||
61 | (fl->oif == sel->ifindex || !sel->ifindex); | ||
62 | } | ||
63 | |||
64 | static inline int | ||
65 | __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl) | ||
66 | { | ||
67 | return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) && | ||
68 | addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) && | ||
69 | !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) && | ||
70 | !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) && | ||
71 | (fl->proto == sel->proto || !sel->proto) && | ||
72 | (fl->oif == sel->ifindex || !sel->ifindex); | ||
73 | } | ||
74 | |||
75 | int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl, | ||
76 | unsigned short family) | ||
77 | { | ||
78 | switch (family) { | ||
79 | case AF_INET: | ||
80 | return __xfrm4_selector_match(sel, fl); | ||
81 | case AF_INET6: | ||
82 | return __xfrm6_selector_match(sel, fl); | ||
83 | } | ||
84 | return 0; | ||
85 | } | ||
86 | |||
53 | int xfrm_register_type(struct xfrm_type *type, unsigned short family) | 87 | int xfrm_register_type(struct xfrm_type *type, unsigned short family) |
54 | { | 88 | { |
55 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family); | 89 | struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family); |
@@ -1177,6 +1211,7 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl, | |||
1177 | if (tmpl->mode == XFRM_MODE_TUNNEL) { | 1211 | if (tmpl->mode == XFRM_MODE_TUNNEL) { |
1178 | remote = &tmpl->id.daddr; | 1212 | remote = &tmpl->id.daddr; |
1179 | local = &tmpl->saddr; | 1213 | local = &tmpl->saddr; |
1214 | family = tmpl->encap_family; | ||
1180 | if (xfrm_addr_any(local, family)) { | 1215 | if (xfrm_addr_any(local, family)) { |
1181 | error = xfrm_get_saddr(&tmp, remote, family); | 1216 | error = xfrm_get_saddr(&tmp, remote, family); |
1182 | if (error) | 1217 | if (error) |
@@ -1894,7 +1929,8 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, | |||
1894 | 1929 | ||
1895 | if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family)) | 1930 | if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family)) |
1896 | return 0; | 1931 | return 0; |
1897 | if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm, pol)) | 1932 | if (fl && pol && |
1933 | !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl)) | ||
1898 | return 0; | 1934 | return 0; |
1899 | if (dst->xfrm->km.state != XFRM_STATE_VALID) | 1935 | if (dst->xfrm->km.state != XFRM_STATE_VALID) |
1900 | return 0; | 1936 | return 0; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 40c527179843..da54a64ccfa3 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -1304,7 +1304,7 @@ int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol) | |||
1304 | } | 1304 | } |
1305 | EXPORT_SYMBOL(km_query); | 1305 | EXPORT_SYMBOL(km_query); |
1306 | 1306 | ||
1307 | int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport) | 1307 | int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) |
1308 | { | 1308 | { |
1309 | int err = -EINVAL; | 1309 | int err = -EINVAL; |
1310 | struct xfrm_mgr *km; | 1310 | struct xfrm_mgr *km; |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index c4cde57d9216..6f97665983d2 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -244,11 +244,10 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, | |||
244 | *props = algo->desc.sadb_alg_id; | 244 | *props = algo->desc.sadb_alg_id; |
245 | 245 | ||
246 | len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8; | 246 | len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8; |
247 | p = kmalloc(len, GFP_KERNEL); | 247 | p = kmemdup(ualg, len, GFP_KERNEL); |
248 | if (!p) | 248 | if (!p) |
249 | return -ENOMEM; | 249 | return -ENOMEM; |
250 | 250 | ||
251 | memcpy(p, ualg, len); | ||
252 | strcpy(p->alg_name, algo->name); | 251 | strcpy(p->alg_name, algo->name); |
253 | *algpp = p; | 252 | *algpp = p; |
254 | return 0; | 253 | return 0; |
@@ -263,11 +262,10 @@ static int attach_encap_tmpl(struct xfrm_encap_tmpl **encapp, struct rtattr *u_a | |||
263 | return 0; | 262 | return 0; |
264 | 263 | ||
265 | uencap = RTA_DATA(rta); | 264 | uencap = RTA_DATA(rta); |
266 | p = kmalloc(sizeof(*p), GFP_KERNEL); | 265 | p = kmemdup(uencap, sizeof(*p), GFP_KERNEL); |
267 | if (!p) | 266 | if (!p) |
268 | return -ENOMEM; | 267 | return -ENOMEM; |
269 | 268 | ||
270 | memcpy(p, uencap, sizeof(*p)); | ||
271 | *encapp = p; | 269 | *encapp = p; |
272 | return 0; | 270 | return 0; |
273 | } | 271 | } |
@@ -305,11 +303,10 @@ static int attach_one_addr(xfrm_address_t **addrpp, struct rtattr *u_arg) | |||
305 | return 0; | 303 | return 0; |
306 | 304 | ||
307 | uaddrp = RTA_DATA(rta); | 305 | uaddrp = RTA_DATA(rta); |
308 | p = kmalloc(sizeof(*p), GFP_KERNEL); | 306 | p = kmemdup(uaddrp, sizeof(*p), GFP_KERNEL); |
309 | if (!p) | 307 | if (!p) |
310 | return -ENOMEM; | 308 | return -ENOMEM; |
311 | 309 | ||
312 | memcpy(p, uaddrp, sizeof(*p)); | ||
313 | *addrpp = p; | 310 | *addrpp = p; |
314 | return 0; | 311 | return 0; |
315 | } | 312 | } |
@@ -495,6 +492,7 @@ static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p, | |||
495 | goto out; | 492 | goto out; |
496 | } | 493 | } |
497 | 494 | ||
495 | err = -ESRCH; | ||
498 | x = xfrm_state_lookup_byaddr(&p->daddr, saddr, p->proto, | 496 | x = xfrm_state_lookup_byaddr(&p->daddr, saddr, p->proto, |
499 | p->family); | 497 | p->family); |
500 | } | 498 | } |
@@ -652,7 +650,6 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, | |||
652 | if (!skb) | 650 | if (!skb) |
653 | return ERR_PTR(-ENOMEM); | 651 | return ERR_PTR(-ENOMEM); |
654 | 652 | ||
655 | NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid; | ||
656 | info.in_skb = in_skb; | 653 | info.in_skb = in_skb; |
657 | info.out_skb = skb; | 654 | info.out_skb = skb; |
658 | info.nlmsg_seq = seq; | 655 | info.nlmsg_seq = seq; |
@@ -772,7 +769,7 @@ out_noput: | |||
772 | return err; | 769 | return err; |
773 | } | 770 | } |
774 | 771 | ||
775 | static int verify_policy_dir(__u8 dir) | 772 | static int verify_policy_dir(u8 dir) |
776 | { | 773 | { |
777 | switch (dir) { | 774 | switch (dir) { |
778 | case XFRM_POLICY_IN: | 775 | case XFRM_POLICY_IN: |
@@ -787,7 +784,7 @@ static int verify_policy_dir(__u8 dir) | |||
787 | return 0; | 784 | return 0; |
788 | } | 785 | } |
789 | 786 | ||
790 | static int verify_policy_type(__u8 type) | 787 | static int verify_policy_type(u8 type) |
791 | { | 788 | { |
792 | switch (type) { | 789 | switch (type) { |
793 | case XFRM_POLICY_TYPE_MAIN: | 790 | case XFRM_POLICY_TYPE_MAIN: |
@@ -861,6 +858,7 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, | |||
861 | int i; | 858 | int i; |
862 | 859 | ||
863 | xp->xfrm_nr = nr; | 860 | xp->xfrm_nr = nr; |
861 | xp->family = ut->family; | ||
864 | for (i = 0; i < nr; i++, ut++) { | 862 | for (i = 0; i < nr; i++, ut++) { |
865 | struct xfrm_tmpl *t = &xp->xfrm_vec[i]; | 863 | struct xfrm_tmpl *t = &xp->xfrm_vec[i]; |
866 | 864 | ||
@@ -874,6 +872,7 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, | |||
874 | t->aalgos = ut->aalgos; | 872 | t->aalgos = ut->aalgos; |
875 | t->ealgos = ut->ealgos; | 873 | t->ealgos = ut->ealgos; |
876 | t->calgos = ut->calgos; | 874 | t->calgos = ut->calgos; |
875 | t->encap_family = ut->family; | ||
877 | } | 876 | } |
878 | } | 877 | } |
879 | 878 | ||
@@ -900,7 +899,7 @@ static int copy_from_user_policy_type(u8 *tp, struct rtattr **xfrma) | |||
900 | { | 899 | { |
901 | struct rtattr *rt = xfrma[XFRMA_POLICY_TYPE-1]; | 900 | struct rtattr *rt = xfrma[XFRMA_POLICY_TYPE-1]; |
902 | struct xfrm_userpolicy_type *upt; | 901 | struct xfrm_userpolicy_type *upt; |
903 | __u8 type = XFRM_POLICY_TYPE_MAIN; | 902 | u8 type = XFRM_POLICY_TYPE_MAIN; |
904 | int err; | 903 | int err; |
905 | 904 | ||
906 | if (rt) { | 905 | if (rt) { |
@@ -1027,7 +1026,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) | |||
1027 | struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; | 1026 | struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; |
1028 | 1027 | ||
1029 | memcpy(&up->id, &kp->id, sizeof(up->id)); | 1028 | memcpy(&up->id, &kp->id, sizeof(up->id)); |
1030 | up->family = xp->family; | 1029 | up->family = kp->encap_family; |
1031 | memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); | 1030 | memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); |
1032 | up->reqid = kp->reqid; | 1031 | up->reqid = kp->reqid; |
1033 | up->mode = kp->mode; | 1032 | up->mode = kp->mode; |
@@ -1082,12 +1081,12 @@ static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *s | |||
1082 | } | 1081 | } |
1083 | 1082 | ||
1084 | #ifdef CONFIG_XFRM_SUB_POLICY | 1083 | #ifdef CONFIG_XFRM_SUB_POLICY |
1085 | static int copy_to_user_policy_type(struct xfrm_policy *xp, struct sk_buff *skb) | 1084 | static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) |
1086 | { | 1085 | { |
1087 | struct xfrm_userpolicy_type upt; | 1086 | struct xfrm_userpolicy_type upt; |
1088 | 1087 | ||
1089 | memset(&upt, 0, sizeof(upt)); | 1088 | memset(&upt, 0, sizeof(upt)); |
1090 | upt.type = xp->type; | 1089 | upt.type = type; |
1091 | 1090 | ||
1092 | RTA_PUT(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); | 1091 | RTA_PUT(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); |
1093 | 1092 | ||
@@ -1098,7 +1097,7 @@ rtattr_failure: | |||
1098 | } | 1097 | } |
1099 | 1098 | ||
1100 | #else | 1099 | #else |
1101 | static inline int copy_to_user_policy_type(struct xfrm_policy *xp, struct sk_buff *skb) | 1100 | static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb) |
1102 | { | 1101 | { |
1103 | return 0; | 1102 | return 0; |
1104 | } | 1103 | } |
@@ -1127,7 +1126,7 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr | |||
1127 | goto nlmsg_failure; | 1126 | goto nlmsg_failure; |
1128 | if (copy_to_user_sec_ctx(xp, skb)) | 1127 | if (copy_to_user_sec_ctx(xp, skb)) |
1129 | goto nlmsg_failure; | 1128 | goto nlmsg_failure; |
1130 | if (copy_to_user_policy_type(xp, skb) < 0) | 1129 | if (copy_to_user_policy_type(xp->type, skb) < 0) |
1131 | goto nlmsg_failure; | 1130 | goto nlmsg_failure; |
1132 | 1131 | ||
1133 | nlh->nlmsg_len = skb->tail - b; | 1132 | nlh->nlmsg_len = skb->tail - b; |
@@ -1170,7 +1169,6 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, | |||
1170 | if (!skb) | 1169 | if (!skb) |
1171 | return ERR_PTR(-ENOMEM); | 1170 | return ERR_PTR(-ENOMEM); |
1172 | 1171 | ||
1173 | NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid; | ||
1174 | info.in_skb = in_skb; | 1172 | info.in_skb = in_skb; |
1175 | info.out_skb = skb; | 1173 | info.out_skb = skb; |
1176 | info.nlmsg_seq = seq; | 1174 | info.nlmsg_seq = seq; |
@@ -1189,7 +1187,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr | |||
1189 | { | 1187 | { |
1190 | struct xfrm_policy *xp; | 1188 | struct xfrm_policy *xp; |
1191 | struct xfrm_userpolicy_id *p; | 1189 | struct xfrm_userpolicy_id *p; |
1192 | __u8 type = XFRM_POLICY_TYPE_MAIN; | 1190 | u8 type = XFRM_POLICY_TYPE_MAIN; |
1193 | int err; | 1191 | int err; |
1194 | struct km_event c; | 1192 | struct km_event c; |
1195 | int delete; | 1193 | int delete; |
@@ -1283,10 +1281,12 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_eve | |||
1283 | id = NLMSG_DATA(nlh); | 1281 | id = NLMSG_DATA(nlh); |
1284 | nlh->nlmsg_flags = 0; | 1282 | nlh->nlmsg_flags = 0; |
1285 | 1283 | ||
1286 | id->sa_id.daddr = x->id.daddr; | 1284 | memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr)); |
1287 | id->sa_id.spi = x->id.spi; | 1285 | id->sa_id.spi = x->id.spi; |
1288 | id->sa_id.family = x->props.family; | 1286 | id->sa_id.family = x->props.family; |
1289 | id->sa_id.proto = x->id.proto; | 1287 | id->sa_id.proto = x->id.proto; |
1288 | memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr)); | ||
1289 | id->reqid = x->props.reqid; | ||
1290 | id->flags = c->data.aevent; | 1290 | id->flags = c->data.aevent; |
1291 | 1291 | ||
1292 | RTA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); | 1292 | RTA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); |
@@ -1407,7 +1407,7 @@ out: | |||
1407 | static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | 1407 | static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) |
1408 | { | 1408 | { |
1409 | struct km_event c; | 1409 | struct km_event c; |
1410 | __u8 type = XFRM_POLICY_TYPE_MAIN; | 1410 | u8 type = XFRM_POLICY_TYPE_MAIN; |
1411 | int err; | 1411 | int err; |
1412 | 1412 | ||
1413 | err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma); | 1413 | err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma); |
@@ -1428,7 +1428,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void * | |||
1428 | struct xfrm_policy *xp; | 1428 | struct xfrm_policy *xp; |
1429 | struct xfrm_user_polexpire *up = NLMSG_DATA(nlh); | 1429 | struct xfrm_user_polexpire *up = NLMSG_DATA(nlh); |
1430 | struct xfrm_userpolicy_info *p = &up->pol; | 1430 | struct xfrm_userpolicy_info *p = &up->pol; |
1431 | __u8 type = XFRM_POLICY_TYPE_MAIN; | 1431 | u8 type = XFRM_POLICY_TYPE_MAIN; |
1432 | int err = -ENOENT; | 1432 | int err = -ENOENT; |
1433 | 1433 | ||
1434 | err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma); | 1434 | err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma); |
@@ -1907,7 +1907,7 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, | |||
1907 | goto nlmsg_failure; | 1907 | goto nlmsg_failure; |
1908 | if (copy_to_user_state_sec_ctx(x, skb)) | 1908 | if (copy_to_user_state_sec_ctx(x, skb)) |
1909 | goto nlmsg_failure; | 1909 | goto nlmsg_failure; |
1910 | if (copy_to_user_policy_type(xp, skb) < 0) | 1910 | if (copy_to_user_policy_type(xp->type, skb) < 0) |
1911 | goto nlmsg_failure; | 1911 | goto nlmsg_failure; |
1912 | 1912 | ||
1913 | nlh->nlmsg_len = skb->tail - b; | 1913 | nlh->nlmsg_len = skb->tail - b; |
@@ -2017,7 +2017,7 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, | |||
2017 | goto nlmsg_failure; | 2017 | goto nlmsg_failure; |
2018 | if (copy_to_user_sec_ctx(xp, skb)) | 2018 | if (copy_to_user_sec_ctx(xp, skb)) |
2019 | goto nlmsg_failure; | 2019 | goto nlmsg_failure; |
2020 | if (copy_to_user_policy_type(xp, skb) < 0) | 2020 | if (copy_to_user_policy_type(xp->type, skb) < 0) |
2021 | goto nlmsg_failure; | 2021 | goto nlmsg_failure; |
2022 | upe->hard = !!hard; | 2022 | upe->hard = !!hard; |
2023 | 2023 | ||
@@ -2096,7 +2096,7 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event * | |||
2096 | copy_to_user_policy(xp, p, dir); | 2096 | copy_to_user_policy(xp, p, dir); |
2097 | if (copy_to_user_tmpl(xp, skb) < 0) | 2097 | if (copy_to_user_tmpl(xp, skb) < 0) |
2098 | goto nlmsg_failure; | 2098 | goto nlmsg_failure; |
2099 | if (copy_to_user_policy_type(xp, skb) < 0) | 2099 | if (copy_to_user_policy_type(xp->type, skb) < 0) |
2100 | goto nlmsg_failure; | 2100 | goto nlmsg_failure; |
2101 | 2101 | ||
2102 | nlh->nlmsg_len = skb->tail - b; | 2102 | nlh->nlmsg_len = skb->tail - b; |
@@ -2117,7 +2117,6 @@ static int xfrm_notify_policy_flush(struct km_event *c) | |||
2117 | unsigned char *b; | 2117 | unsigned char *b; |
2118 | int len = 0; | 2118 | int len = 0; |
2119 | #ifdef CONFIG_XFRM_SUB_POLICY | 2119 | #ifdef CONFIG_XFRM_SUB_POLICY |
2120 | struct xfrm_userpolicy_type upt; | ||
2121 | len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); | 2120 | len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); |
2122 | #endif | 2121 | #endif |
2123 | len += NLMSG_LENGTH(0); | 2122 | len += NLMSG_LENGTH(0); |
@@ -2130,12 +2129,8 @@ static int xfrm_notify_policy_flush(struct km_event *c) | |||
2130 | 2129 | ||
2131 | nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0); | 2130 | nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0); |
2132 | nlh->nlmsg_flags = 0; | 2131 | nlh->nlmsg_flags = 0; |
2133 | 2132 | if (copy_to_user_policy_type(c->data.type, skb) < 0) | |
2134 | #ifdef CONFIG_XFRM_SUB_POLICY | 2133 | goto nlmsg_failure; |
2135 | memset(&upt, 0, sizeof(upt)); | ||
2136 | upt.type = c->data.type; | ||
2137 | RTA_PUT(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); | ||
2138 | #endif | ||
2139 | 2134 | ||
2140 | nlh->nlmsg_len = skb->tail - b; | 2135 | nlh->nlmsg_len = skb->tail - b; |
2141 | 2136 | ||
@@ -2143,9 +2138,6 @@ static int xfrm_notify_policy_flush(struct km_event *c) | |||
2143 | return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); | 2138 | return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); |
2144 | 2139 | ||
2145 | nlmsg_failure: | 2140 | nlmsg_failure: |
2146 | #ifdef CONFIG_XFRM_SUB_POLICY | ||
2147 | rtattr_failure: | ||
2148 | #endif | ||
2149 | kfree_skb(skb); | 2141 | kfree_skb(skb); |
2150 | return -1; | 2142 | return -1; |
2151 | } | 2143 | } |