diff options
Diffstat (limited to 'net')
111 files changed, 907 insertions, 651 deletions
diff --git a/net/802/p8023.c b/net/802/p8023.c index d23e906456eb..53cf05709283 100644 --- a/net/802/p8023.c +++ b/net/802/p8023.c | |||
@@ -59,3 +59,5 @@ void destroy_8023_client(struct datalink_proto *dl) | |||
59 | 59 | ||
60 | EXPORT_SYMBOL(destroy_8023_client); | 60 | EXPORT_SYMBOL(destroy_8023_client); |
61 | EXPORT_SYMBOL(make_8023_client); | 61 | EXPORT_SYMBOL(make_8023_client); |
62 | |||
63 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/802/psnap.c b/net/802/psnap.c index 4d638944d933..34e42968b477 100644 --- a/net/802/psnap.c +++ b/net/802/psnap.c | |||
@@ -59,8 +59,10 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev, | |||
59 | proto = find_snap_client(skb->h.raw); | 59 | proto = find_snap_client(skb->h.raw); |
60 | if (proto) { | 60 | if (proto) { |
61 | /* Pass the frame on. */ | 61 | /* Pass the frame on. */ |
62 | u8 *hdr = skb->data; | ||
62 | skb->h.raw += 5; | 63 | skb->h.raw += 5; |
63 | skb_pull(skb, 5); | 64 | skb_pull(skb, 5); |
65 | skb_postpull_rcsum(skb, hdr, 5); | ||
64 | rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev); | 66 | rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev); |
65 | } else { | 67 | } else { |
66 | skb->sk = NULL; | 68 | skb->sk = NULL; |
diff --git a/net/Kconfig b/net/Kconfig index bc603d9aea56..5126f58d9c44 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -27,6 +27,13 @@ if NET | |||
27 | 27 | ||
28 | menu "Networking options" | 28 | menu "Networking options" |
29 | 29 | ||
30 | config NETDEBUG | ||
31 | bool "Network packet debugging" | ||
32 | help | ||
33 | You can say Y here if you want to get additional messages useful in | ||
34 | debugging bad packets, but can overwhelm logs under denial of service | ||
35 | attacks. | ||
36 | |||
30 | source "net/packet/Kconfig" | 37 | source "net/packet/Kconfig" |
31 | source "net/unix/Kconfig" | 38 | source "net/unix/Kconfig" |
32 | source "net/xfrm/Kconfig" | 39 | source "net/xfrm/Kconfig" |
diff --git a/net/atm/signaling.c b/net/atm/signaling.c index e7211a7f382c..31d98b57e1de 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c | |||
@@ -39,24 +39,19 @@ static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep); | |||
39 | static void sigd_put_skb(struct sk_buff *skb) | 39 | static void sigd_put_skb(struct sk_buff *skb) |
40 | { | 40 | { |
41 | #ifdef WAIT_FOR_DEMON | 41 | #ifdef WAIT_FOR_DEMON |
42 | static unsigned long silence; | ||
43 | DECLARE_WAITQUEUE(wait,current); | 42 | DECLARE_WAITQUEUE(wait,current); |
44 | 43 | ||
45 | add_wait_queue(&sigd_sleep,&wait); | 44 | add_wait_queue(&sigd_sleep,&wait); |
46 | while (!sigd) { | 45 | while (!sigd) { |
47 | set_current_state(TASK_UNINTERRUPTIBLE); | 46 | set_current_state(TASK_UNINTERRUPTIBLE); |
48 | if (time_after(jiffies, silence) || silence == 0) { | 47 | DPRINTK("atmsvc: waiting for signaling demon...\n"); |
49 | printk(KERN_INFO "atmsvc: waiting for signaling demon " | ||
50 | "...\n"); | ||
51 | silence = (jiffies+30*HZ)|1; | ||
52 | } | ||
53 | schedule(); | 48 | schedule(); |
54 | } | 49 | } |
55 | current->state = TASK_RUNNING; | 50 | current->state = TASK_RUNNING; |
56 | remove_wait_queue(&sigd_sleep,&wait); | 51 | remove_wait_queue(&sigd_sleep,&wait); |
57 | #else | 52 | #else |
58 | if (!sigd) { | 53 | if (!sigd) { |
59 | printk(KERN_WARNING "atmsvc: no signaling demon\n"); | 54 | DPRINTK("atmsvc: no signaling demon\n"); |
60 | kfree_skb(skb); | 55 | kfree_skb(skb); |
61 | return; | 56 | return; |
62 | } | 57 | } |
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c index f67711f2ee96..894a22558d9d 100644 --- a/net/ax25/sysctl_net_ax25.c +++ b/net/ax25/sysctl_net_ax25.c | |||
@@ -24,7 +24,7 @@ static int min_t3[1], max_t3[] = {3600 * HZ}; | |||
24 | static int min_idle[1], max_idle[] = {65535 * HZ}; | 24 | static int min_idle[1], max_idle[] = {65535 * HZ}; |
25 | static int min_n2[] = {1}, max_n2[] = {31}; | 25 | static int min_n2[] = {1}, max_n2[] = {31}; |
26 | static int min_paclen[] = {1}, max_paclen[] = {512}; | 26 | static int min_paclen[] = {1}, max_paclen[] = {512}; |
27 | static int min_proto[1], max_proto[] = {3}; | 27 | static int min_proto[1], max_proto[] = { AX25_PROTO_MAX }; |
28 | static int min_ds_timeout[1], max_ds_timeout[] = {65535 * HZ}; | 28 | static int min_ds_timeout[1], max_ds_timeout[] = {65535 * HZ}; |
29 | 29 | ||
30 | static struct ctl_table_header *ax25_table_header; | 30 | static struct ctl_table_header *ax25_table_header; |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index bdb6458c6bd5..97bdec73d17e 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c | |||
@@ -143,13 +143,15 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) | |||
143 | static int hci_sock_release(struct socket *sock) | 143 | static int hci_sock_release(struct socket *sock) |
144 | { | 144 | { |
145 | struct sock *sk = sock->sk; | 145 | struct sock *sk = sock->sk; |
146 | struct hci_dev *hdev = hci_pi(sk)->hdev; | 146 | struct hci_dev *hdev; |
147 | 147 | ||
148 | BT_DBG("sock %p sk %p", sock, sk); | 148 | BT_DBG("sock %p sk %p", sock, sk); |
149 | 149 | ||
150 | if (!sk) | 150 | if (!sk) |
151 | return 0; | 151 | return 0; |
152 | 152 | ||
153 | hdev = hci_pi(sk)->hdev; | ||
154 | |||
153 | bt_sock_unlink(&hci_sk_list, sk); | 155 | bt_sock_unlink(&hci_sk_list, sk); |
154 | 156 | ||
155 | if (hdev) { | 157 | if (hdev) { |
@@ -311,14 +313,18 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add | |||
311 | { | 313 | { |
312 | struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; | 314 | struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; |
313 | struct sock *sk = sock->sk; | 315 | struct sock *sk = sock->sk; |
316 | struct hci_dev *hdev = hci_pi(sk)->hdev; | ||
314 | 317 | ||
315 | BT_DBG("sock %p sk %p", sock, sk); | 318 | BT_DBG("sock %p sk %p", sock, sk); |
316 | 319 | ||
320 | if (!hdev) | ||
321 | return -EBADFD; | ||
322 | |||
317 | lock_sock(sk); | 323 | lock_sock(sk); |
318 | 324 | ||
319 | *addr_len = sizeof(*haddr); | 325 | *addr_len = sizeof(*haddr); |
320 | haddr->hci_family = AF_BLUETOOTH; | 326 | haddr->hci_family = AF_BLUETOOTH; |
321 | haddr->hci_dev = hci_pi(sk)->hdev->id; | 327 | haddr->hci_dev = hdev->id; |
322 | 328 | ||
323 | release_sock(sk); | 329 | release_sock(sk); |
324 | return 0; | 330 | return 0; |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 0d89d6434136..5b4253c61f62 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -46,13 +46,15 @@ | |||
46 | #include <net/bluetooth/l2cap.h> | 46 | #include <net/bluetooth/l2cap.h> |
47 | #include <net/bluetooth/rfcomm.h> | 47 | #include <net/bluetooth/rfcomm.h> |
48 | 48 | ||
49 | #define VERSION "1.6" | ||
50 | |||
51 | #ifndef CONFIG_BT_RFCOMM_DEBUG | 49 | #ifndef CONFIG_BT_RFCOMM_DEBUG |
52 | #undef BT_DBG | 50 | #undef BT_DBG |
53 | #define BT_DBG(D...) | 51 | #define BT_DBG(D...) |
54 | #endif | 52 | #endif |
55 | 53 | ||
54 | #define VERSION "1.7" | ||
55 | |||
56 | static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU; | ||
57 | |||
56 | static struct task_struct *rfcomm_thread; | 58 | static struct task_struct *rfcomm_thread; |
57 | 59 | ||
58 | static DECLARE_MUTEX(rfcomm_sem); | 60 | static DECLARE_MUTEX(rfcomm_sem); |
@@ -623,7 +625,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst | |||
623 | /* Set L2CAP options */ | 625 | /* Set L2CAP options */ |
624 | sk = sock->sk; | 626 | sk = sock->sk; |
625 | lock_sock(sk); | 627 | lock_sock(sk); |
626 | l2cap_pi(sk)->imtu = RFCOMM_MAX_L2CAP_MTU; | 628 | l2cap_pi(sk)->imtu = l2cap_mtu; |
627 | release_sock(sk); | 629 | release_sock(sk); |
628 | 630 | ||
629 | s = rfcomm_session_add(sock, BT_BOUND); | 631 | s = rfcomm_session_add(sock, BT_BOUND); |
@@ -1868,7 +1870,7 @@ static int rfcomm_add_listener(bdaddr_t *ba) | |||
1868 | /* Set L2CAP options */ | 1870 | /* Set L2CAP options */ |
1869 | sk = sock->sk; | 1871 | sk = sock->sk; |
1870 | lock_sock(sk); | 1872 | lock_sock(sk); |
1871 | l2cap_pi(sk)->imtu = RFCOMM_MAX_L2CAP_MTU; | 1873 | l2cap_pi(sk)->imtu = l2cap_mtu; |
1872 | release_sock(sk); | 1874 | release_sock(sk); |
1873 | 1875 | ||
1874 | /* Start listening on the socket */ | 1876 | /* Start listening on the socket */ |
@@ -2070,6 +2072,9 @@ static void __exit rfcomm_exit(void) | |||
2070 | module_init(rfcomm_init); | 2072 | module_init(rfcomm_init); |
2071 | module_exit(rfcomm_exit); | 2073 | module_exit(rfcomm_exit); |
2072 | 2074 | ||
2075 | module_param(l2cap_mtu, uint, 0644); | ||
2076 | MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection"); | ||
2077 | |||
2073 | MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); | 2078 | MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>"); |
2074 | MODULE_DESCRIPTION("Bluetooth RFCOMM ver " VERSION); | 2079 | MODULE_DESCRIPTION("Bluetooth RFCOMM ver " VERSION); |
2075 | MODULE_VERSION(VERSION); | 2080 | MODULE_VERSION(VERSION); |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index ba442883e877..f36b35edd60c 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -79,27 +79,48 @@ static int port_cost(struct net_device *dev) | |||
79 | */ | 79 | */ |
80 | static void port_carrier_check(void *arg) | 80 | static void port_carrier_check(void *arg) |
81 | { | 81 | { |
82 | struct net_bridge_port *p = arg; | 82 | struct net_device *dev = arg; |
83 | struct net_bridge_port *p; | ||
84 | struct net_bridge *br; | ||
83 | 85 | ||
84 | rtnl_lock(); | 86 | rtnl_lock(); |
85 | if (netif_carrier_ok(p->dev)) { | 87 | p = dev->br_port; |
86 | u32 cost = port_cost(p->dev); | 88 | if (!p) |
89 | goto done; | ||
90 | br = p->br; | ||
91 | |||
92 | if (netif_carrier_ok(dev)) | ||
93 | p->path_cost = port_cost(dev); | ||
87 | 94 | ||
88 | spin_lock_bh(&p->br->lock); | 95 | if (br->dev->flags & IFF_UP) { |
89 | if (p->state == BR_STATE_DISABLED) { | 96 | spin_lock_bh(&br->lock); |
90 | p->path_cost = cost; | 97 | if (netif_carrier_ok(dev)) { |
91 | br_stp_enable_port(p); | 98 | if (p->state == BR_STATE_DISABLED) |
99 | br_stp_enable_port(p); | ||
100 | } else { | ||
101 | if (p->state != BR_STATE_DISABLED) | ||
102 | br_stp_disable_port(p); | ||
92 | } | 103 | } |
93 | spin_unlock_bh(&p->br->lock); | 104 | spin_unlock_bh(&br->lock); |
94 | } else { | ||
95 | spin_lock_bh(&p->br->lock); | ||
96 | if (p->state != BR_STATE_DISABLED) | ||
97 | br_stp_disable_port(p); | ||
98 | spin_unlock_bh(&p->br->lock); | ||
99 | } | 105 | } |
106 | done: | ||
100 | rtnl_unlock(); | 107 | rtnl_unlock(); |
101 | } | 108 | } |
102 | 109 | ||
110 | static void release_nbp(struct kobject *kobj) | ||
111 | { | ||
112 | struct net_bridge_port *p | ||
113 | = container_of(kobj, struct net_bridge_port, kobj); | ||
114 | kfree(p); | ||
115 | } | ||
116 | |||
117 | static struct kobj_type brport_ktype = { | ||
118 | #ifdef CONFIG_SYSFS | ||
119 | .sysfs_ops = &brport_sysfs_ops, | ||
120 | #endif | ||
121 | .release = release_nbp, | ||
122 | }; | ||
123 | |||
103 | static void destroy_nbp(struct net_bridge_port *p) | 124 | static void destroy_nbp(struct net_bridge_port *p) |
104 | { | 125 | { |
105 | struct net_device *dev = p->dev; | 126 | struct net_device *dev = p->dev; |
@@ -108,7 +129,7 @@ static void destroy_nbp(struct net_bridge_port *p) | |||
108 | p->dev = NULL; | 129 | p->dev = NULL; |
109 | dev_put(dev); | 130 | dev_put(dev); |
110 | 131 | ||
111 | br_sysfs_freeif(p); | 132 | kobject_put(&p->kobj); |
112 | } | 133 | } |
113 | 134 | ||
114 | static void destroy_nbp_rcu(struct rcu_head *head) | 135 | static void destroy_nbp_rcu(struct rcu_head *head) |
@@ -118,17 +139,25 @@ static void destroy_nbp_rcu(struct rcu_head *head) | |||
118 | destroy_nbp(p); | 139 | destroy_nbp(p); |
119 | } | 140 | } |
120 | 141 | ||
121 | /* called with RTNL */ | 142 | /* Delete port(interface) from bridge is done in two steps. |
143 | * via RCU. First step, marks device as down. That deletes | ||
144 | * all the timers and stops new packets from flowing through. | ||
145 | * | ||
146 | * Final cleanup doesn't occur until after all CPU's finished | ||
147 | * processing packets. | ||
148 | * | ||
149 | * Protected from multiple admin operations by RTNL mutex | ||
150 | */ | ||
122 | static void del_nbp(struct net_bridge_port *p) | 151 | static void del_nbp(struct net_bridge_port *p) |
123 | { | 152 | { |
124 | struct net_bridge *br = p->br; | 153 | struct net_bridge *br = p->br; |
125 | struct net_device *dev = p->dev; | 154 | struct net_device *dev = p->dev; |
126 | 155 | ||
127 | dev->br_port = NULL; | 156 | sysfs_remove_link(&br->ifobj, dev->name); |
157 | |||
128 | dev_set_promiscuity(dev, -1); | 158 | dev_set_promiscuity(dev, -1); |
129 | 159 | ||
130 | cancel_delayed_work(&p->carrier_check); | 160 | cancel_delayed_work(&p->carrier_check); |
131 | flush_scheduled_work(); | ||
132 | 161 | ||
133 | spin_lock_bh(&br->lock); | 162 | spin_lock_bh(&br->lock); |
134 | br_stp_disable_port(p); | 163 | br_stp_disable_port(p); |
@@ -138,10 +167,11 @@ static void del_nbp(struct net_bridge_port *p) | |||
138 | 167 | ||
139 | list_del_rcu(&p->list); | 168 | list_del_rcu(&p->list); |
140 | 169 | ||
141 | del_timer_sync(&p->message_age_timer); | 170 | rcu_assign_pointer(dev->br_port, NULL); |
142 | del_timer_sync(&p->forward_delay_timer); | 171 | |
143 | del_timer_sync(&p->hold_timer); | 172 | kobject_uevent(&p->kobj, KOBJ_REMOVE); |
144 | 173 | kobject_del(&p->kobj); | |
174 | |||
145 | call_rcu(&p->rcu, destroy_nbp_rcu); | 175 | call_rcu(&p->rcu, destroy_nbp_rcu); |
146 | } | 176 | } |
147 | 177 | ||
@@ -151,7 +181,6 @@ static void del_br(struct net_bridge *br) | |||
151 | struct net_bridge_port *p, *n; | 181 | struct net_bridge_port *p, *n; |
152 | 182 | ||
153 | list_for_each_entry_safe(p, n, &br->port_list, list) { | 183 | list_for_each_entry_safe(p, n, &br->port_list, list) { |
154 | br_sysfs_removeif(p); | ||
155 | del_nbp(p); | 184 | del_nbp(p); |
156 | } | 185 | } |
157 | 186 | ||
@@ -245,12 +274,17 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, | |||
245 | p->dev = dev; | 274 | p->dev = dev; |
246 | p->path_cost = port_cost(dev); | 275 | p->path_cost = port_cost(dev); |
247 | p->priority = 0x8000 >> BR_PORT_BITS; | 276 | p->priority = 0x8000 >> BR_PORT_BITS; |
248 | dev->br_port = p; | ||
249 | p->port_no = index; | 277 | p->port_no = index; |
250 | br_init_port(p); | 278 | br_init_port(p); |
251 | p->state = BR_STATE_DISABLED; | 279 | p->state = BR_STATE_DISABLED; |
252 | INIT_WORK(&p->carrier_check, port_carrier_check, p); | 280 | INIT_WORK(&p->carrier_check, port_carrier_check, dev); |
281 | br_stp_port_timer_init(p); | ||
282 | |||
253 | kobject_init(&p->kobj); | 283 | kobject_init(&p->kobj); |
284 | kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR); | ||
285 | p->kobj.ktype = &brport_ktype; | ||
286 | p->kobj.parent = &(dev->class_dev.kobj); | ||
287 | p->kobj.kset = NULL; | ||
254 | 288 | ||
255 | return p; | 289 | return p; |
256 | } | 290 | } |
@@ -379,30 +413,43 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
379 | if (dev->br_port != NULL) | 413 | if (dev->br_port != NULL) |
380 | return -EBUSY; | 414 | return -EBUSY; |
381 | 415 | ||
382 | if (IS_ERR(p = new_nbp(br, dev))) | 416 | p = new_nbp(br, dev); |
417 | if (IS_ERR(p)) | ||
383 | return PTR_ERR(p); | 418 | return PTR_ERR(p); |
384 | 419 | ||
385 | if ((err = br_fdb_insert(br, p, dev->dev_addr))) | 420 | err = kobject_add(&p->kobj); |
386 | destroy_nbp(p); | 421 | if (err) |
387 | 422 | goto err0; | |
388 | else if ((err = br_sysfs_addif(p))) | ||
389 | del_nbp(p); | ||
390 | else { | ||
391 | dev_set_promiscuity(dev, 1); | ||
392 | 423 | ||
393 | list_add_rcu(&p->list, &br->port_list); | 424 | err = br_fdb_insert(br, p, dev->dev_addr); |
425 | if (err) | ||
426 | goto err1; | ||
394 | 427 | ||
395 | spin_lock_bh(&br->lock); | 428 | err = br_sysfs_addif(p); |
396 | br_stp_recalculate_bridge_id(br); | 429 | if (err) |
397 | br_features_recompute(br); | 430 | goto err2; |
398 | if ((br->dev->flags & IFF_UP) | ||
399 | && (dev->flags & IFF_UP) && netif_carrier_ok(dev)) | ||
400 | br_stp_enable_port(p); | ||
401 | spin_unlock_bh(&br->lock); | ||
402 | 431 | ||
403 | dev_set_mtu(br->dev, br_min_mtu(br)); | 432 | rcu_assign_pointer(dev->br_port, p); |
404 | } | 433 | dev_set_promiscuity(dev, 1); |
434 | |||
435 | list_add_rcu(&p->list, &br->port_list); | ||
405 | 436 | ||
437 | spin_lock_bh(&br->lock); | ||
438 | br_stp_recalculate_bridge_id(br); | ||
439 | br_features_recompute(br); | ||
440 | schedule_delayed_work(&p->carrier_check, BR_PORT_DEBOUNCE); | ||
441 | spin_unlock_bh(&br->lock); | ||
442 | |||
443 | dev_set_mtu(br->dev, br_min_mtu(br)); | ||
444 | kobject_uevent(&p->kobj, KOBJ_ADD); | ||
445 | |||
446 | return 0; | ||
447 | err2: | ||
448 | br_fdb_delete_by_port(br, p); | ||
449 | err1: | ||
450 | kobject_del(&p->kobj); | ||
451 | err0: | ||
452 | kobject_put(&p->kobj); | ||
406 | return err; | 453 | return err; |
407 | } | 454 | } |
408 | 455 | ||
@@ -414,7 +461,6 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) | |||
414 | if (!p || p->br != br) | 461 | if (!p || p->br != br) |
415 | return -EINVAL; | 462 | return -EINVAL; |
416 | 463 | ||
417 | br_sysfs_removeif(p); | ||
418 | del_nbp(p); | 464 | del_nbp(p); |
419 | 465 | ||
420 | spin_lock_bh(&br->lock); | 466 | spin_lock_bh(&br->lock); |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index e3a73cead6b6..4eef83755315 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -45,18 +45,20 @@ static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) | |||
45 | int br_handle_frame_finish(struct sk_buff *skb) | 45 | int br_handle_frame_finish(struct sk_buff *skb) |
46 | { | 46 | { |
47 | const unsigned char *dest = eth_hdr(skb)->h_dest; | 47 | const unsigned char *dest = eth_hdr(skb)->h_dest; |
48 | struct net_bridge_port *p = skb->dev->br_port; | 48 | struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); |
49 | struct net_bridge *br = p->br; | 49 | struct net_bridge *br; |
50 | struct net_bridge_fdb_entry *dst; | 50 | struct net_bridge_fdb_entry *dst; |
51 | int passedup = 0; | 51 | int passedup = 0; |
52 | 52 | ||
53 | if (!p || p->state == BR_STATE_DISABLED) | ||
54 | goto drop; | ||
55 | |||
53 | /* insert into forwarding database after filtering to avoid spoofing */ | 56 | /* insert into forwarding database after filtering to avoid spoofing */ |
54 | br_fdb_update(p->br, p, eth_hdr(skb)->h_source); | 57 | br = p->br; |
58 | br_fdb_update(br, p, eth_hdr(skb)->h_source); | ||
55 | 59 | ||
56 | if (p->state == BR_STATE_LEARNING) { | 60 | if (p->state == BR_STATE_LEARNING) |
57 | kfree_skb(skb); | 61 | goto drop; |
58 | goto out; | ||
59 | } | ||
60 | 62 | ||
61 | if (br->dev->flags & IFF_PROMISC) { | 63 | if (br->dev->flags & IFF_PROMISC) { |
62 | struct sk_buff *skb2; | 64 | struct sk_buff *skb2; |
@@ -93,6 +95,9 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
93 | 95 | ||
94 | out: | 96 | out: |
95 | return 0; | 97 | return 0; |
98 | drop: | ||
99 | kfree_skb(skb); | ||
100 | goto out; | ||
96 | } | 101 | } |
97 | 102 | ||
98 | /* | 103 | /* |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 7cac3fb9f809..e060aad8624d 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -51,9 +51,6 @@ | |||
51 | #define store_orig_dstaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->daddr) | 51 | #define store_orig_dstaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->daddr) |
52 | #define dnat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->daddr) | 52 | #define dnat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->daddr) |
53 | 53 | ||
54 | #define has_bridge_parent(device) ((device)->br_port != NULL) | ||
55 | #define bridge_parent(device) ((device)->br_port->br->dev) | ||
56 | |||
57 | #ifdef CONFIG_SYSCTL | 54 | #ifdef CONFIG_SYSCTL |
58 | static struct ctl_table_header *brnf_sysctl_header; | 55 | static struct ctl_table_header *brnf_sysctl_header; |
59 | static int brnf_call_iptables = 1; | 56 | static int brnf_call_iptables = 1; |
@@ -93,11 +90,18 @@ static struct rtable __fake_rtable = { | |||
93 | .dev = &__fake_net_device, | 90 | .dev = &__fake_net_device, |
94 | .path = &__fake_rtable.u.dst, | 91 | .path = &__fake_rtable.u.dst, |
95 | .metrics = {[RTAX_MTU - 1] = 1500}, | 92 | .metrics = {[RTAX_MTU - 1] = 1500}, |
93 | .flags = DST_NOXFRM, | ||
96 | } | 94 | } |
97 | }, | 95 | }, |
98 | .rt_flags = 0, | 96 | .rt_flags = 0, |
99 | }; | 97 | }; |
100 | 98 | ||
99 | static inline struct net_device *bridge_parent(const struct net_device *dev) | ||
100 | { | ||
101 | struct net_bridge_port *port = rcu_dereference(dev->br_port); | ||
102 | |||
103 | return port ? port->br->dev : NULL; | ||
104 | } | ||
101 | 105 | ||
102 | /* PF_BRIDGE/PRE_ROUTING *********************************************/ | 106 | /* PF_BRIDGE/PRE_ROUTING *********************************************/ |
103 | /* Undo the changes made for ip6tables PREROUTING and continue the | 107 | /* Undo the changes made for ip6tables PREROUTING and continue the |
@@ -189,11 +193,15 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) | |||
189 | skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; | 193 | skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; |
190 | 194 | ||
191 | skb->dev = bridge_parent(skb->dev); | 195 | skb->dev = bridge_parent(skb->dev); |
192 | if (skb->protocol == __constant_htons(ETH_P_8021Q)) { | 196 | if (!skb->dev) |
193 | skb_pull(skb, VLAN_HLEN); | 197 | kfree_skb(skb); |
194 | skb->nh.raw += VLAN_HLEN; | 198 | else { |
199 | if (skb->protocol == __constant_htons(ETH_P_8021Q)) { | ||
200 | skb_pull(skb, VLAN_HLEN); | ||
201 | skb->nh.raw += VLAN_HLEN; | ||
202 | } | ||
203 | skb->dst->output(skb); | ||
195 | } | 204 | } |
196 | skb->dst->output(skb); | ||
197 | return 0; | 205 | return 0; |
198 | } | 206 | } |
199 | 207 | ||
@@ -270,7 +278,7 @@ bridged_dnat: | |||
270 | } | 278 | } |
271 | 279 | ||
272 | /* Some common code for IPv4/IPv6 */ | 280 | /* Some common code for IPv4/IPv6 */ |
273 | static void setup_pre_routing(struct sk_buff *skb) | 281 | static struct net_device *setup_pre_routing(struct sk_buff *skb) |
274 | { | 282 | { |
275 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; | 283 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; |
276 | 284 | ||
@@ -282,6 +290,8 @@ static void setup_pre_routing(struct sk_buff *skb) | |||
282 | nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING; | 290 | nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING; |
283 | nf_bridge->physindev = skb->dev; | 291 | nf_bridge->physindev = skb->dev; |
284 | skb->dev = bridge_parent(skb->dev); | 292 | skb->dev = bridge_parent(skb->dev); |
293 | |||
294 | return skb->dev; | ||
285 | } | 295 | } |
286 | 296 | ||
287 | /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */ | 297 | /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */ |
@@ -376,7 +386,8 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook, | |||
376 | nf_bridge_put(skb->nf_bridge); | 386 | nf_bridge_put(skb->nf_bridge); |
377 | if ((nf_bridge = nf_bridge_alloc(skb)) == NULL) | 387 | if ((nf_bridge = nf_bridge_alloc(skb)) == NULL) |
378 | return NF_DROP; | 388 | return NF_DROP; |
379 | setup_pre_routing(skb); | 389 | if (!setup_pre_routing(skb)) |
390 | return NF_DROP; | ||
380 | 391 | ||
381 | NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL, | 392 | NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL, |
382 | br_nf_pre_routing_finish_ipv6); | 393 | br_nf_pre_routing_finish_ipv6); |
@@ -465,7 +476,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, | |||
465 | nf_bridge_put(skb->nf_bridge); | 476 | nf_bridge_put(skb->nf_bridge); |
466 | if ((nf_bridge = nf_bridge_alloc(skb)) == NULL) | 477 | if ((nf_bridge = nf_bridge_alloc(skb)) == NULL) |
467 | return NF_DROP; | 478 | return NF_DROP; |
468 | setup_pre_routing(skb); | 479 | if (!setup_pre_routing(skb)) |
480 | return NF_DROP; | ||
469 | store_orig_dstaddr(skb); | 481 | store_orig_dstaddr(skb); |
470 | 482 | ||
471 | NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL, | 483 | NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL, |
@@ -539,11 +551,16 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb, | |||
539 | struct sk_buff *skb = *pskb; | 551 | struct sk_buff *skb = *pskb; |
540 | struct nf_bridge_info *nf_bridge; | 552 | struct nf_bridge_info *nf_bridge; |
541 | struct vlan_ethhdr *hdr = vlan_eth_hdr(skb); | 553 | struct vlan_ethhdr *hdr = vlan_eth_hdr(skb); |
554 | struct net_device *parent; | ||
542 | int pf; | 555 | int pf; |
543 | 556 | ||
544 | if (!skb->nf_bridge) | 557 | if (!skb->nf_bridge) |
545 | return NF_ACCEPT; | 558 | return NF_ACCEPT; |
546 | 559 | ||
560 | parent = bridge_parent(out); | ||
561 | if (!parent) | ||
562 | return NF_DROP; | ||
563 | |||
547 | if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP) | 564 | if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP) |
548 | pf = PF_INET; | 565 | pf = PF_INET; |
549 | else | 566 | else |
@@ -564,8 +581,8 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb, | |||
564 | nf_bridge->mask |= BRNF_BRIDGED; | 581 | nf_bridge->mask |= BRNF_BRIDGED; |
565 | nf_bridge->physoutdev = skb->dev; | 582 | nf_bridge->physoutdev = skb->dev; |
566 | 583 | ||
567 | NF_HOOK(pf, NF_IP_FORWARD, skb, bridge_parent(in), | 584 | NF_HOOK(pf, NF_IP_FORWARD, skb, bridge_parent(in), parent, |
568 | bridge_parent(out), br_nf_forward_finish); | 585 | br_nf_forward_finish); |
569 | 586 | ||
570 | return NF_STOLEN; | 587 | return NF_STOLEN; |
571 | } | 588 | } |
@@ -688,6 +705,8 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, | |||
688 | goto out; | 705 | goto out; |
689 | } | 706 | } |
690 | realoutdev = bridge_parent(skb->dev); | 707 | realoutdev = bridge_parent(skb->dev); |
708 | if (!realoutdev) | ||
709 | return NF_DROP; | ||
691 | 710 | ||
692 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 711 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
693 | /* iptables should match -o br0.x */ | 712 | /* iptables should match -o br0.x */ |
@@ -701,9 +720,11 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, | |||
701 | /* IP forwarded traffic has a physindev, locally | 720 | /* IP forwarded traffic has a physindev, locally |
702 | * generated traffic hasn't. */ | 721 | * generated traffic hasn't. */ |
703 | if (realindev != NULL) { | 722 | if (realindev != NULL) { |
704 | if (!(nf_bridge->mask & BRNF_DONT_TAKE_PARENT) && | 723 | if (!(nf_bridge->mask & BRNF_DONT_TAKE_PARENT) ) { |
705 | has_bridge_parent(realindev)) | 724 | struct net_device *parent = bridge_parent(realindev); |
706 | realindev = bridge_parent(realindev); | 725 | if (parent) |
726 | realindev = parent; | ||
727 | } | ||
707 | 728 | ||
708 | NF_HOOK_THRESH(pf, NF_IP_FORWARD, skb, realindev, | 729 | NF_HOOK_THRESH(pf, NF_IP_FORWARD, skb, realindev, |
709 | realoutdev, br_nf_local_out_finish, | 730 | realoutdev, br_nf_local_out_finish, |
@@ -743,6 +764,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, | |||
743 | if (!nf_bridge) | 764 | if (!nf_bridge) |
744 | return NF_ACCEPT; | 765 | return NF_ACCEPT; |
745 | 766 | ||
767 | if (!realoutdev) | ||
768 | return NF_DROP; | ||
769 | |||
746 | if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP) | 770 | if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP) |
747 | pf = PF_INET; | 771 | pf = PF_INET; |
748 | else | 772 | else |
@@ -782,8 +806,8 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, | |||
782 | print_error: | 806 | print_error: |
783 | if (skb->dev != NULL) { | 807 | if (skb->dev != NULL) { |
784 | printk("[%s]", skb->dev->name); | 808 | printk("[%s]", skb->dev->name); |
785 | if (has_bridge_parent(skb->dev)) | 809 | if (realoutdev) |
786 | printk("[%s]", bridge_parent(skb->dev)->name); | 810 | printk("[%s]", realoutdev->name); |
787 | } | 811 | } |
788 | printk(" head:%p, raw:%p, data:%p\n", skb->head, skb->mac.raw, | 812 | printk(" head:%p, raw:%p, data:%p\n", skb->head, skb->mac.raw, |
789 | skb->data); | 813 | skb->data); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index c5bd631ffcd5..8f10e09f251b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -232,9 +232,8 @@ extern void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent); | |||
232 | 232 | ||
233 | #ifdef CONFIG_SYSFS | 233 | #ifdef CONFIG_SYSFS |
234 | /* br_sysfs_if.c */ | 234 | /* br_sysfs_if.c */ |
235 | extern struct sysfs_ops brport_sysfs_ops; | ||
235 | extern int br_sysfs_addif(struct net_bridge_port *p); | 236 | extern int br_sysfs_addif(struct net_bridge_port *p); |
236 | extern void br_sysfs_removeif(struct net_bridge_port *p); | ||
237 | extern void br_sysfs_freeif(struct net_bridge_port *p); | ||
238 | 237 | ||
239 | /* br_sysfs_br.c */ | 238 | /* br_sysfs_br.c */ |
240 | extern int br_sysfs_addbr(struct net_device *dev); | 239 | extern int br_sysfs_addbr(struct net_device *dev); |
@@ -243,8 +242,6 @@ extern void br_sysfs_delbr(struct net_device *dev); | |||
243 | #else | 242 | #else |
244 | 243 | ||
245 | #define br_sysfs_addif(p) (0) | 244 | #define br_sysfs_addif(p) (0) |
246 | #define br_sysfs_removeif(p) do { } while(0) | ||
247 | #define br_sysfs_freeif(p) kfree(p) | ||
248 | #define br_sysfs_addbr(dev) (0) | 245 | #define br_sysfs_addbr(dev) (0) |
249 | #define br_sysfs_delbr(dev) do { } while(0) | 246 | #define br_sysfs_delbr(dev) do { } while(0) |
250 | #endif /* CONFIG_SYSFS */ | 247 | #endif /* CONFIG_SYSFS */ |
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index d071f1c9ad0b..296f6a487c52 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c | |||
@@ -133,29 +133,35 @@ void br_send_tcn_bpdu(struct net_bridge_port *p) | |||
133 | 133 | ||
134 | static const unsigned char header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00}; | 134 | static const unsigned char header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00}; |
135 | 135 | ||
136 | /* NO locks */ | 136 | /* NO locks, but rcu_read_lock (preempt_disabled) */ |
137 | int br_stp_handle_bpdu(struct sk_buff *skb) | 137 | int br_stp_handle_bpdu(struct sk_buff *skb) |
138 | { | 138 | { |
139 | struct net_bridge_port *p = skb->dev->br_port; | 139 | struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); |
140 | struct net_bridge *br = p->br; | 140 | struct net_bridge *br; |
141 | unsigned char *buf; | 141 | unsigned char *buf; |
142 | 142 | ||
143 | if (!p) | ||
144 | goto err; | ||
145 | |||
146 | br = p->br; | ||
147 | spin_lock(&br->lock); | ||
148 | |||
149 | if (p->state == BR_STATE_DISABLED || !(br->dev->flags & IFF_UP)) | ||
150 | goto out; | ||
151 | |||
143 | /* insert into forwarding database after filtering to avoid spoofing */ | 152 | /* insert into forwarding database after filtering to avoid spoofing */ |
144 | br_fdb_update(p->br, p, eth_hdr(skb)->h_source); | 153 | br_fdb_update(br, p, eth_hdr(skb)->h_source); |
154 | |||
155 | if (!br->stp_enabled) | ||
156 | goto out; | ||
145 | 157 | ||
146 | /* need at least the 802 and STP headers */ | 158 | /* need at least the 802 and STP headers */ |
147 | if (!pskb_may_pull(skb, sizeof(header)+1) || | 159 | if (!pskb_may_pull(skb, sizeof(header)+1) || |
148 | memcmp(skb->data, header, sizeof(header))) | 160 | memcmp(skb->data, header, sizeof(header))) |
149 | goto err; | 161 | goto out; |
150 | 162 | ||
151 | buf = skb_pull(skb, sizeof(header)); | 163 | buf = skb_pull(skb, sizeof(header)); |
152 | 164 | ||
153 | spin_lock_bh(&br->lock); | ||
154 | if (p->state == BR_STATE_DISABLED | ||
155 | || !(br->dev->flags & IFF_UP) | ||
156 | || !br->stp_enabled) | ||
157 | goto out; | ||
158 | |||
159 | if (buf[0] == BPDU_TYPE_CONFIG) { | 165 | if (buf[0] == BPDU_TYPE_CONFIG) { |
160 | struct br_config_bpdu bpdu; | 166 | struct br_config_bpdu bpdu; |
161 | 167 | ||
@@ -201,7 +207,7 @@ int br_stp_handle_bpdu(struct sk_buff *skb) | |||
201 | br_received_tcn_bpdu(p); | 207 | br_received_tcn_bpdu(p); |
202 | } | 208 | } |
203 | out: | 209 | out: |
204 | spin_unlock_bh(&br->lock); | 210 | spin_unlock(&br->lock); |
205 | err: | 211 | err: |
206 | kfree_skb(skb); | 212 | kfree_skb(skb); |
207 | return 0; | 213 | return 0; |
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index cc047f7fb6ef..23dea1422c9a 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
@@ -39,8 +39,6 @@ void br_init_port(struct net_bridge_port *p) | |||
39 | p->state = BR_STATE_BLOCKING; | 39 | p->state = BR_STATE_BLOCKING; |
40 | p->topology_change_ack = 0; | 40 | p->topology_change_ack = 0; |
41 | p->config_pending = 0; | 41 | p->config_pending = 0; |
42 | |||
43 | br_stp_port_timer_init(p); | ||
44 | } | 42 | } |
45 | 43 | ||
46 | /* called under bridge lock */ | 44 | /* called under bridge lock */ |
@@ -67,7 +65,7 @@ void br_stp_disable_bridge(struct net_bridge *br) | |||
67 | { | 65 | { |
68 | struct net_bridge_port *p; | 66 | struct net_bridge_port *p; |
69 | 67 | ||
70 | spin_lock(&br->lock); | 68 | spin_lock_bh(&br->lock); |
71 | list_for_each_entry(p, &br->port_list, list) { | 69 | list_for_each_entry(p, &br->port_list, list) { |
72 | if (p->state != BR_STATE_DISABLED) | 70 | if (p->state != BR_STATE_DISABLED) |
73 | br_stp_disable_port(p); | 71 | br_stp_disable_port(p); |
@@ -76,7 +74,7 @@ void br_stp_disable_bridge(struct net_bridge *br) | |||
76 | 74 | ||
77 | br->topology_change = 0; | 75 | br->topology_change = 0; |
78 | br->topology_change_detected = 0; | 76 | br->topology_change_detected = 0; |
79 | spin_unlock(&br->lock); | 77 | spin_unlock_bh(&br->lock); |
80 | 78 | ||
81 | del_timer_sync(&br->hello_timer); | 79 | del_timer_sync(&br->hello_timer); |
82 | del_timer_sync(&br->topology_change_timer); | 80 | del_timer_sync(&br->topology_change_timer); |
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index 0ac0355d16dd..c51c9e42aeb3 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c | |||
@@ -195,23 +195,11 @@ static ssize_t brport_store(struct kobject * kobj, | |||
195 | return ret; | 195 | return ret; |
196 | } | 196 | } |
197 | 197 | ||
198 | /* called from kobject_put when port ref count goes to zero. */ | 198 | struct sysfs_ops brport_sysfs_ops = { |
199 | static void brport_release(struct kobject *kobj) | ||
200 | { | ||
201 | kfree(container_of(kobj, struct net_bridge_port, kobj)); | ||
202 | } | ||
203 | |||
204 | static struct sysfs_ops brport_sysfs_ops = { | ||
205 | .show = brport_show, | 199 | .show = brport_show, |
206 | .store = brport_store, | 200 | .store = brport_store, |
207 | }; | 201 | }; |
208 | 202 | ||
209 | static struct kobj_type brport_ktype = { | ||
210 | .sysfs_ops = &brport_sysfs_ops, | ||
211 | .release = brport_release, | ||
212 | }; | ||
213 | |||
214 | |||
215 | /* | 203 | /* |
216 | * Add sysfs entries to ethernet device added to a bridge. | 204 | * Add sysfs entries to ethernet device added to a bridge. |
217 | * Creates a brport subdirectory with bridge attributes. | 205 | * Creates a brport subdirectory with bridge attributes. |
@@ -223,17 +211,6 @@ int br_sysfs_addif(struct net_bridge_port *p) | |||
223 | struct brport_attribute **a; | 211 | struct brport_attribute **a; |
224 | int err; | 212 | int err; |
225 | 213 | ||
226 | ASSERT_RTNL(); | ||
227 | |||
228 | kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR); | ||
229 | p->kobj.ktype = &brport_ktype; | ||
230 | p->kobj.parent = &(p->dev->class_dev.kobj); | ||
231 | p->kobj.kset = NULL; | ||
232 | |||
233 | err = kobject_add(&p->kobj); | ||
234 | if(err) | ||
235 | goto out1; | ||
236 | |||
237 | err = sysfs_create_link(&p->kobj, &br->dev->class_dev.kobj, | 214 | err = sysfs_create_link(&p->kobj, &br->dev->class_dev.kobj, |
238 | SYSFS_BRIDGE_PORT_LINK); | 215 | SYSFS_BRIDGE_PORT_LINK); |
239 | if (err) | 216 | if (err) |
@@ -245,28 +222,7 @@ int br_sysfs_addif(struct net_bridge_port *p) | |||
245 | goto out2; | 222 | goto out2; |
246 | } | 223 | } |
247 | 224 | ||
248 | err = sysfs_create_link(&br->ifobj, &p->kobj, p->dev->name); | 225 | err= sysfs_create_link(&br->ifobj, &p->kobj, p->dev->name); |
249 | if (err) | 226 | out2: |
250 | goto out2; | ||
251 | |||
252 | kobject_uevent(&p->kobj, KOBJ_ADD); | ||
253 | return 0; | ||
254 | out2: | ||
255 | kobject_del(&p->kobj); | ||
256 | out1: | ||
257 | return err; | 227 | return err; |
258 | } | 228 | } |
259 | |||
260 | void br_sysfs_removeif(struct net_bridge_port *p) | ||
261 | { | ||
262 | pr_debug("br_sysfs_removeif\n"); | ||
263 | sysfs_remove_link(&p->br->ifobj, p->dev->name); | ||
264 | kobject_uevent(&p->kobj, KOBJ_REMOVE); | ||
265 | kobject_del(&p->kobj); | ||
266 | } | ||
267 | |||
268 | void br_sysfs_freeif(struct net_bridge_port *p) | ||
269 | { | ||
270 | pr_debug("br_sysfs_freeif\n"); | ||
271 | kobject_put(&p->kobj); | ||
272 | } | ||
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile index 8bf6d9f6e9d3..905087e0d485 100644 --- a/net/bridge/netfilter/Makefile +++ b/net/bridge/netfilter/Makefile | |||
@@ -29,4 +29,4 @@ obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o | |||
29 | 29 | ||
30 | # watchers | 30 | # watchers |
31 | obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o | 31 | obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o |
32 | obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_ulog.o | 32 | obj-$(CONFIG_BRIDGE_EBT_ULOG) += ebt_ulog.o |
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index 0128fbbe2328..288ff1d4ccc4 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c | |||
@@ -166,7 +166,12 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr, | |||
166 | li.u.log.level = info->loglevel; | 166 | li.u.log.level = info->loglevel; |
167 | li.u.log.logflags = info->bitmask; | 167 | li.u.log.logflags = info->bitmask; |
168 | 168 | ||
169 | nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, info->prefix); | 169 | if (info->bitmask & EBT_LOG_NFLOG) |
170 | nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, | ||
171 | info->prefix); | ||
172 | else | ||
173 | ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, | ||
174 | info->prefix); | ||
170 | } | 175 | } |
171 | 176 | ||
172 | static struct ebt_watcher log = | 177 | static struct ebt_watcher log = |
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index ce617b3dbbb8..802baf755ef4 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #define PRINTR(format, args...) do { if (net_ratelimit()) \ | 46 | #define PRINTR(format, args...) do { if (net_ratelimit()) \ |
47 | printk(format , ## args); } while (0) | 47 | printk(format , ## args); } while (0) |
48 | 48 | ||
49 | static unsigned int nlbufsiz = 4096; | 49 | static unsigned int nlbufsiz = NLMSG_GOODSIZE; |
50 | module_param(nlbufsiz, uint, 0600); | 50 | module_param(nlbufsiz, uint, 0600); |
51 | MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) " | 51 | MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) " |
52 | "(defaults to 4096)"); | 52 | "(defaults to 4096)"); |
@@ -98,12 +98,14 @@ static void ulog_timer(unsigned long data) | |||
98 | static struct sk_buff *ulog_alloc_skb(unsigned int size) | 98 | static struct sk_buff *ulog_alloc_skb(unsigned int size) |
99 | { | 99 | { |
100 | struct sk_buff *skb; | 100 | struct sk_buff *skb; |
101 | unsigned int n; | ||
101 | 102 | ||
102 | skb = alloc_skb(nlbufsiz, GFP_ATOMIC); | 103 | n = max(size, nlbufsiz); |
104 | skb = alloc_skb(n, GFP_ATOMIC); | ||
103 | if (!skb) { | 105 | if (!skb) { |
104 | PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer " | 106 | PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer " |
105 | "of size %ub!\n", nlbufsiz); | 107 | "of size %ub!\n", n); |
106 | if (size < nlbufsiz) { | 108 | if (n > size) { |
107 | /* try to allocate only as much as we need for | 109 | /* try to allocate only as much as we need for |
108 | * current packet */ | 110 | * current packet */ |
109 | skb = alloc_skb(size, GFP_ATOMIC); | 111 | skb = alloc_skb(size, GFP_ATOMIC); |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 00729b3604f8..cbd4020cc84d 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -934,6 +934,13 @@ static int do_replace(void __user *user, unsigned int len) | |||
934 | BUGPRINT("Entries_size never zero\n"); | 934 | BUGPRINT("Entries_size never zero\n"); |
935 | return -EINVAL; | 935 | return -EINVAL; |
936 | } | 936 | } |
937 | /* overflow check */ | ||
938 | if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS - | ||
939 | SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) | ||
940 | return -ENOMEM; | ||
941 | if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) | ||
942 | return -ENOMEM; | ||
943 | |||
937 | countersize = COUNTER_OFFSET(tmp.nentries) * | 944 | countersize = COUNTER_OFFSET(tmp.nentries) * |
938 | (highest_possible_processor_id()+1); | 945 | (highest_possible_processor_id()+1); |
939 | newinfo = (struct ebt_table_info *) | 946 | newinfo = (struct ebt_table_info *) |
diff --git a/net/core/datagram.c b/net/core/datagram.c index f8d322e1ea92..b8ce6bf81188 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -247,49 +247,74 @@ EXPORT_SYMBOL(skb_kill_datagram); | |||
247 | int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, | 247 | int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, |
248 | struct iovec *to, int len) | 248 | struct iovec *to, int len) |
249 | { | 249 | { |
250 | int i, err, fraglen, end = 0; | 250 | int start = skb_headlen(skb); |
251 | struct sk_buff *next = skb_shinfo(skb)->frag_list; | 251 | int i, copy = start - offset; |
252 | 252 | ||
253 | if (!len) | 253 | /* Copy header. */ |
254 | return 0; | 254 | if (copy > 0) { |
255 | if (copy > len) | ||
256 | copy = len; | ||
257 | if (memcpy_toiovec(to, skb->data + offset, copy)) | ||
258 | goto fault; | ||
259 | if ((len -= copy) == 0) | ||
260 | return 0; | ||
261 | offset += copy; | ||
262 | } | ||
255 | 263 | ||
256 | next_skb: | 264 | /* Copy paged appendix. Hmm... why does this look so complicated? */ |
257 | fraglen = skb_headlen(skb); | 265 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
258 | i = -1; | 266 | int end; |
259 | 267 | ||
260 | while (1) { | 268 | BUG_TRAP(start <= offset + len); |
261 | int start = end; | ||
262 | 269 | ||
263 | if ((end += fraglen) > offset) { | 270 | end = start + skb_shinfo(skb)->frags[i].size; |
264 | int copy = end - offset, o = offset - start; | 271 | if ((copy = end - offset) > 0) { |
272 | int err; | ||
273 | u8 *vaddr; | ||
274 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
275 | struct page *page = frag->page; | ||
265 | 276 | ||
266 | if (copy > len) | 277 | if (copy > len) |
267 | copy = len; | 278 | copy = len; |
268 | if (i == -1) | 279 | vaddr = kmap(page); |
269 | err = memcpy_toiovec(to, skb->data + o, copy); | 280 | err = memcpy_toiovec(to, vaddr + frag->page_offset + |
270 | else { | 281 | offset - start, copy); |
271 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 282 | kunmap(page); |
272 | struct page *page = frag->page; | ||
273 | void *p = kmap(page) + frag->page_offset + o; | ||
274 | err = memcpy_toiovec(to, p, copy); | ||
275 | kunmap(page); | ||
276 | } | ||
277 | if (err) | 283 | if (err) |
278 | goto fault; | 284 | goto fault; |
279 | if (!(len -= copy)) | 285 | if (!(len -= copy)) |
280 | return 0; | 286 | return 0; |
281 | offset += copy; | 287 | offset += copy; |
282 | } | 288 | } |
283 | if (++i >= skb_shinfo(skb)->nr_frags) | 289 | start = end; |
284 | break; | ||
285 | fraglen = skb_shinfo(skb)->frags[i].size; | ||
286 | } | 290 | } |
287 | if (next) { | 291 | |
288 | skb = next; | 292 | if (skb_shinfo(skb)->frag_list) { |
289 | BUG_ON(skb_shinfo(skb)->frag_list); | 293 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
290 | next = skb->next; | 294 | |
291 | goto next_skb; | 295 | for (; list; list = list->next) { |
296 | int end; | ||
297 | |||
298 | BUG_TRAP(start <= offset + len); | ||
299 | |||
300 | end = start + list->len; | ||
301 | if ((copy = end - offset) > 0) { | ||
302 | if (copy > len) | ||
303 | copy = len; | ||
304 | if (skb_copy_datagram_iovec(list, | ||
305 | offset - start, | ||
306 | to, copy)) | ||
307 | goto fault; | ||
308 | if ((len -= copy) == 0) | ||
309 | return 0; | ||
310 | offset += copy; | ||
311 | } | ||
312 | start = end; | ||
313 | } | ||
292 | } | 314 | } |
315 | if (!len) | ||
316 | return 0; | ||
317 | |||
293 | fault: | 318 | fault: |
294 | return -EFAULT; | 319 | return -EFAULT; |
295 | } | 320 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index fd070a098f20..2afb0de95329 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2543,13 +2543,14 @@ int dev_ioctl(unsigned int cmd, void __user *arg) | |||
2543 | case SIOCBONDENSLAVE: | 2543 | case SIOCBONDENSLAVE: |
2544 | case SIOCBONDRELEASE: | 2544 | case SIOCBONDRELEASE: |
2545 | case SIOCBONDSETHWADDR: | 2545 | case SIOCBONDSETHWADDR: |
2546 | case SIOCBONDSLAVEINFOQUERY: | ||
2547 | case SIOCBONDINFOQUERY: | ||
2548 | case SIOCBONDCHANGEACTIVE: | 2546 | case SIOCBONDCHANGEACTIVE: |
2549 | case SIOCBRADDIF: | 2547 | case SIOCBRADDIF: |
2550 | case SIOCBRDELIF: | 2548 | case SIOCBRDELIF: |
2551 | if (!capable(CAP_NET_ADMIN)) | 2549 | if (!capable(CAP_NET_ADMIN)) |
2552 | return -EPERM; | 2550 | return -EPERM; |
2551 | /* fall through */ | ||
2552 | case SIOCBONDSLAVEINFOQUERY: | ||
2553 | case SIOCBONDINFOQUERY: | ||
2553 | dev_load(ifr.ifr_name); | 2554 | dev_load(ifr.ifr_name); |
2554 | rtnl_lock(); | 2555 | rtnl_lock(); |
2555 | ret = dev_ifsioc(&ifr, cmd); | 2556 | ret = dev_ifsioc(&ifr, cmd); |
@@ -3236,7 +3237,7 @@ static int __init net_dev_init(void) | |||
3236 | * Initialise the packet receive queues. | 3237 | * Initialise the packet receive queues. |
3237 | */ | 3238 | */ |
3238 | 3239 | ||
3239 | for (i = 0; i < NR_CPUS; i++) { | 3240 | for_each_cpu(i) { |
3240 | struct softnet_data *queue; | 3241 | struct softnet_data *queue; |
3241 | 3242 | ||
3242 | queue = &per_cpu(softnet_data, i); | 3243 | queue = &per_cpu(softnet_data, i); |
diff --git a/net/core/filter.c b/net/core/filter.c index 9540946a48f3..93fbd01d2259 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -64,7 +64,7 @@ static inline void *load_pointer(struct sk_buff *skb, int k, | |||
64 | } | 64 | } |
65 | 65 | ||
66 | /** | 66 | /** |
67 | * sk_run_filter - run a filter on a socket | 67 | * sk_run_filter - run a filter on a socket |
68 | * @skb: buffer to run the filter on | 68 | * @skb: buffer to run the filter on |
69 | * @filter: filter to apply | 69 | * @filter: filter to apply |
70 | * @flen: length of filter | 70 | * @flen: length of filter |
@@ -78,8 +78,8 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int | |||
78 | { | 78 | { |
79 | struct sock_filter *fentry; /* We walk down these */ | 79 | struct sock_filter *fentry; /* We walk down these */ |
80 | void *ptr; | 80 | void *ptr; |
81 | u32 A = 0; /* Accumulator */ | 81 | u32 A = 0; /* Accumulator */ |
82 | u32 X = 0; /* Index Register */ | 82 | u32 X = 0; /* Index Register */ |
83 | u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ | 83 | u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ |
84 | u32 tmp; | 84 | u32 tmp; |
85 | int k; | 85 | int k; |
diff --git a/net/core/request_sock.c b/net/core/request_sock.c index b8203de5ff07..98f0fc923f91 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c | |||
@@ -52,7 +52,6 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, | |||
52 | get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); | 52 | get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); |
53 | rwlock_init(&queue->syn_wait_lock); | 53 | rwlock_init(&queue->syn_wait_lock); |
54 | queue->rskq_accept_head = queue->rskq_accept_head = NULL; | 54 | queue->rskq_accept_head = queue->rskq_accept_head = NULL; |
55 | queue->rskq_defer_accept = 0; | ||
56 | lopt->nr_table_entries = nr_table_entries; | 55 | lopt->nr_table_entries = nr_table_entries; |
57 | 56 | ||
58 | write_lock_bh(&queue->syn_wait_lock); | 57 | write_lock_bh(&queue->syn_wait_lock); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 8700379685e0..eca2976abb25 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -455,7 +455,7 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) | |||
455 | if (!skb) | 455 | if (!skb) |
456 | return; | 456 | return; |
457 | 457 | ||
458 | if (rtnetlink_fill_ifinfo(skb, dev, type, current->pid, 0, change, 0) < 0) { | 458 | if (rtnetlink_fill_ifinfo(skb, dev, type, 0, 0, change, 0) < 0) { |
459 | kfree_skb(skb); | 459 | kfree_skb(skb); |
460 | return; | 460 | return; |
461 | } | 461 | } |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index d0732e9c8560..2144952d1c6c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -135,13 +135,15 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) | |||
135 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | 135 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
136 | int fclone) | 136 | int fclone) |
137 | { | 137 | { |
138 | kmem_cache_t *cache; | ||
138 | struct skb_shared_info *shinfo; | 139 | struct skb_shared_info *shinfo; |
139 | struct sk_buff *skb; | 140 | struct sk_buff *skb; |
140 | u8 *data; | 141 | u8 *data; |
141 | 142 | ||
143 | cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; | ||
144 | |||
142 | /* Get the HEAD */ | 145 | /* Get the HEAD */ |
143 | skb = kmem_cache_alloc(fclone ? skbuff_fclone_cache : skbuff_head_cache, | 146 | skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); |
144 | gfp_mask & ~__GFP_DMA); | ||
145 | if (!skb) | 147 | if (!skb) |
146 | goto out; | 148 | goto out; |
147 | 149 | ||
@@ -180,7 +182,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
180 | out: | 182 | out: |
181 | return skb; | 183 | return skb; |
182 | nodata: | 184 | nodata: |
183 | kmem_cache_free(skbuff_head_cache, skb); | 185 | kmem_cache_free(cache, skb); |
184 | skb = NULL; | 186 | skb = NULL; |
185 | goto out; | 187 | goto out; |
186 | } | 188 | } |
@@ -409,6 +411,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
409 | C(pkt_type); | 411 | C(pkt_type); |
410 | C(ip_summed); | 412 | C(ip_summed); |
411 | C(priority); | 413 | C(priority); |
414 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) | ||
415 | C(ipvs_property); | ||
416 | #endif | ||
412 | C(protocol); | 417 | C(protocol); |
413 | n->destructor = NULL; | 418 | n->destructor = NULL; |
414 | #ifdef CONFIG_NETFILTER | 419 | #ifdef CONFIG_NETFILTER |
@@ -420,13 +425,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) | |||
420 | C(nfct_reasm); | 425 | C(nfct_reasm); |
421 | nf_conntrack_get_reasm(skb->nfct_reasm); | 426 | nf_conntrack_get_reasm(skb->nfct_reasm); |
422 | #endif | 427 | #endif |
423 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) | ||
424 | C(ipvs_property); | ||
425 | #endif | ||
426 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
427 | C(nfct_reasm); | ||
428 | nf_conntrack_get_reasm(skb->nfct_reasm); | ||
429 | #endif | ||
430 | #ifdef CONFIG_BRIDGE_NETFILTER | 428 | #ifdef CONFIG_BRIDGE_NETFILTER |
431 | C(nf_bridge); | 429 | C(nf_bridge); |
432 | nf_bridge_get(skb->nf_bridge); | 430 | nf_bridge_get(skb->nf_bridge); |
diff --git a/net/core/utils.c b/net/core/utils.c index ac1d1fcf8673..fdc4f38bc46c 100644 --- a/net/core/utils.c +++ b/net/core/utils.c | |||
@@ -121,7 +121,7 @@ void __init net_random_init(void) | |||
121 | { | 121 | { |
122 | int i; | 122 | int i; |
123 | 123 | ||
124 | for (i = 0; i < NR_CPUS; i++) { | 124 | for_each_cpu(i) { |
125 | struct nrnd_state *state = &per_cpu(net_rand_state,i); | 125 | struct nrnd_state *state = &per_cpu(net_rand_state,i); |
126 | __net_srandom(state, i+jiffies); | 126 | __net_srandom(state, i+jiffies); |
127 | } | 127 | } |
@@ -133,7 +133,7 @@ static int net_random_reseed(void) | |||
133 | unsigned long seed[NR_CPUS]; | 133 | unsigned long seed[NR_CPUS]; |
134 | 134 | ||
135 | get_random_bytes(seed, sizeof(seed)); | 135 | get_random_bytes(seed, sizeof(seed)); |
136 | for (i = 0; i < NR_CPUS; i++) { | 136 | for_each_cpu(i) { |
137 | struct nrnd_state *state = &per_cpu(net_rand_state,i); | 137 | struct nrnd_state *state = &per_cpu(net_rand_state,i); |
138 | __net_srandom(state, seed[i]); | 138 | __net_srandom(state, seed[i]); |
139 | } | 139 | } |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index aa68e0ab274d..35d1d347541c 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * net/dccp/ccids/ccid3.c | 2 | * net/dccp/ccids/ccid3.c |
3 | * | 3 | * |
4 | * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. | 4 | * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. |
5 | * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> | 5 | * Copyright (c) 2005-6 Ian McDonald <imcdnzl@gmail.com> |
6 | * | 6 | * |
7 | * An implementation of the DCCP protocol | 7 | * An implementation of the DCCP protocol |
8 | * | 8 | * |
@@ -1033,9 +1033,13 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |||
1033 | p_prev = hcrx->ccid3hcrx_p; | 1033 | p_prev = hcrx->ccid3hcrx_p; |
1034 | 1034 | ||
1035 | /* Calculate loss event rate */ | 1035 | /* Calculate loss event rate */ |
1036 | if (!list_empty(&hcrx->ccid3hcrx_li_hist)) | 1036 | if (!list_empty(&hcrx->ccid3hcrx_li_hist)) { |
1037 | u32 i_mean = dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist); | ||
1038 | |||
1037 | /* Scaling up by 1000000 as fixed decimal */ | 1039 | /* Scaling up by 1000000 as fixed decimal */ |
1038 | hcrx->ccid3hcrx_p = 1000000 / dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist); | 1040 | if (i_mean != 0) |
1041 | hcrx->ccid3hcrx_p = 1000000 / i_mean; | ||
1042 | } | ||
1039 | 1043 | ||
1040 | if (hcrx->ccid3hcrx_p > p_prev) { | 1044 | if (hcrx->ccid3hcrx_p > p_prev) { |
1041 | ccid3_hc_rx_send_feedback(sk); | 1045 | ccid3_hc_rx_send_feedback(sk); |
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c index d2b5933b4510..add3cae65e2d 100644 --- a/net/dccp/ccids/lib/tfrc_equation.c +++ b/net/dccp/ccids/lib/tfrc_equation.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/config.h> | 15 | #include <linux/config.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | 17 | ||
18 | #include <asm/bug.h> | ||
19 | #include <asm/div64.h> | 18 | #include <asm/div64.h> |
20 | 19 | ||
21 | #include "tfrc.h" | 20 | #include "tfrc.h" |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 00f983226672..dc0487b5bace 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -119,7 +119,8 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
119 | if (err != 0) | 119 | if (err != 0) |
120 | goto failure; | 120 | goto failure; |
121 | 121 | ||
122 | err = ip_route_newports(&rt, inet->sport, inet->dport, sk); | 122 | err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport, |
123 | sk); | ||
123 | if (err != 0) | 124 | if (err != 0) |
124 | goto failure; | 125 | goto failure; |
125 | 126 | ||
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index df074259f9c3..80c4d048869e 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -468,6 +468,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
468 | done: | 468 | done: |
469 | if (opt && opt != np->opt) | 469 | if (opt && opt != np->opt) |
470 | sock_kfree_s(sk, opt, opt->tot_len); | 470 | sock_kfree_s(sk, opt, opt->tot_len); |
471 | dst_release(dst); | ||
471 | return err; | 472 | return err; |
472 | } | 473 | } |
473 | 474 | ||
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 9890fd97e538..c971f14712ec 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -95,6 +95,12 @@ int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, | |||
95 | saddr = dev->dev_addr; | 95 | saddr = dev->dev_addr; |
96 | memcpy(eth->h_source,saddr,dev->addr_len); | 96 | memcpy(eth->h_source,saddr,dev->addr_len); |
97 | 97 | ||
98 | if(daddr) | ||
99 | { | ||
100 | memcpy(eth->h_dest,daddr,dev->addr_len); | ||
101 | return ETH_HLEN; | ||
102 | } | ||
103 | |||
98 | /* | 104 | /* |
99 | * Anyway, the loopback-device should never use this function... | 105 | * Anyway, the loopback-device should never use this function... |
100 | */ | 106 | */ |
@@ -105,12 +111,6 @@ int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, | |||
105 | return ETH_HLEN; | 111 | return ETH_HLEN; |
106 | } | 112 | } |
107 | 113 | ||
108 | if(daddr) | ||
109 | { | ||
110 | memcpy(eth->h_dest,daddr,dev->addr_len); | ||
111 | return ETH_HLEN; | ||
112 | } | ||
113 | |||
114 | return -ETH_HLEN; | 114 | return -ETH_HLEN; |
115 | } | 115 | } |
116 | 116 | ||
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index 470221728503..3840d1911f2b 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c | |||
@@ -131,7 +131,7 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm, | |||
131 | a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == | 131 | a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == |
132 | (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)); | 132 | (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)); |
133 | qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) && | 133 | qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) && |
134 | (WLAN_FC_GET_STYPE(fc) & 0x08)); | 134 | (WLAN_FC_GET_STYPE(fc) & IEEE80211_STYPE_QOS_DATA)); |
135 | aad_len = 22; | 135 | aad_len = 22; |
136 | if (a4_included) | 136 | if (a4_included) |
137 | aad_len += 6; | 137 | aad_len += 6; |
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 7a121802faa9..7ac6a7165d9c 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c | |||
@@ -350,6 +350,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
350 | u8 src[ETH_ALEN]; | 350 | u8 src[ETH_ALEN]; |
351 | struct ieee80211_crypt_data *crypt = NULL; | 351 | struct ieee80211_crypt_data *crypt = NULL; |
352 | int keyidx = 0; | 352 | int keyidx = 0; |
353 | int can_be_decrypted = 0; | ||
353 | 354 | ||
354 | hdr = (struct ieee80211_hdr_4addr *)skb->data; | 355 | hdr = (struct ieee80211_hdr_4addr *)skb->data; |
355 | stats = &ieee->stats; | 356 | stats = &ieee->stats; |
@@ -410,12 +411,23 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
410 | return 1; | 411 | return 1; |
411 | } | 412 | } |
412 | 413 | ||
413 | if (is_multicast_ether_addr(hdr->addr1) | 414 | can_be_decrypted = (is_multicast_ether_addr(hdr->addr1) || |
414 | ? ieee->host_mc_decrypt : ieee->host_decrypt) { | 415 | is_broadcast_ether_addr(hdr->addr2)) ? |
416 | ieee->host_mc_decrypt : ieee->host_decrypt; | ||
417 | |||
418 | if (can_be_decrypted) { | ||
415 | int idx = 0; | 419 | int idx = 0; |
416 | if (skb->len >= hdrlen + 3) | 420 | if (skb->len >= hdrlen + 3) { |
421 | /* Top two-bits of byte 3 are the key index */ | ||
417 | idx = skb->data[hdrlen + 3] >> 6; | 422 | idx = skb->data[hdrlen + 3] >> 6; |
423 | } | ||
424 | |||
425 | /* ieee->crypt[] is WEP_KEY (4) in length. Given that idx | ||
426 | * is only allowed 2-bits of storage, no value of idx can | ||
427 | * be provided via above code that would result in idx | ||
428 | * being out of range */ | ||
418 | crypt = ieee->crypt[idx]; | 429 | crypt = ieee->crypt[idx]; |
430 | |||
419 | #ifdef NOT_YET | 431 | #ifdef NOT_YET |
420 | sta = NULL; | 432 | sta = NULL; |
421 | 433 | ||
@@ -553,7 +565,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
553 | 565 | ||
554 | /* skb: hdr + (possibly fragmented, possibly encrypted) payload */ | 566 | /* skb: hdr + (possibly fragmented, possibly encrypted) payload */ |
555 | 567 | ||
556 | if (ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && | 568 | if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted && |
557 | (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) | 569 | (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) |
558 | goto rx_dropped; | 570 | goto rx_dropped; |
559 | 571 | ||
@@ -617,7 +629,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | |||
617 | 629 | ||
618 | /* skb: hdr + (possible reassembled) full MSDU payload; possibly still | 630 | /* skb: hdr + (possible reassembled) full MSDU payload; possibly still |
619 | * encrypted/authenticated */ | 631 | * encrypted/authenticated */ |
620 | if (ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && | 632 | if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted && |
621 | ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) | 633 | ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) |
622 | goto rx_dropped; | 634 | goto rx_dropped; |
623 | 635 | ||
@@ -1289,7 +1301,7 @@ static void update_network(struct ieee80211_network *dst, | |||
1289 | /* dst->last_associate is not overwritten */ | 1301 | /* dst->last_associate is not overwritten */ |
1290 | } | 1302 | } |
1291 | 1303 | ||
1292 | static inline int is_beacon(int fc) | 1304 | static inline int is_beacon(__le16 fc) |
1293 | { | 1305 | { |
1294 | return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON); | 1306 | return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON); |
1295 | } | 1307 | } |
@@ -1336,9 +1348,7 @@ static void ieee80211_process_probe_response(struct ieee80211_device | |||
1336 | escape_essid(info_element->data, | 1348 | escape_essid(info_element->data, |
1337 | info_element->len), | 1349 | info_element->len), |
1338 | MAC_ARG(beacon->header.addr3), | 1350 | MAC_ARG(beacon->header.addr3), |
1339 | is_beacon(le16_to_cpu | 1351 | is_beacon(beacon->header.frame_ctl) ? |
1340 | (beacon->header. | ||
1341 | frame_ctl)) ? | ||
1342 | "BEACON" : "PROBE RESPONSE"); | 1352 | "BEACON" : "PROBE RESPONSE"); |
1343 | return; | 1353 | return; |
1344 | } | 1354 | } |
@@ -1388,9 +1398,7 @@ static void ieee80211_process_probe_response(struct ieee80211_device | |||
1388 | escape_essid(network.ssid, | 1398 | escape_essid(network.ssid, |
1389 | network.ssid_len), | 1399 | network.ssid_len), |
1390 | MAC_ARG(network.bssid), | 1400 | MAC_ARG(network.bssid), |
1391 | is_beacon(le16_to_cpu | 1401 | is_beacon(beacon->header.frame_ctl) ? |
1392 | (beacon->header. | ||
1393 | frame_ctl)) ? | ||
1394 | "BEACON" : "PROBE RESPONSE"); | 1402 | "BEACON" : "PROBE RESPONSE"); |
1395 | #endif | 1403 | #endif |
1396 | memcpy(target, &network, sizeof(*target)); | 1404 | memcpy(target, &network, sizeof(*target)); |
@@ -1400,21 +1408,19 @@ static void ieee80211_process_probe_response(struct ieee80211_device | |||
1400 | escape_essid(target->ssid, | 1408 | escape_essid(target->ssid, |
1401 | target->ssid_len), | 1409 | target->ssid_len), |
1402 | MAC_ARG(target->bssid), | 1410 | MAC_ARG(target->bssid), |
1403 | is_beacon(le16_to_cpu | 1411 | is_beacon(beacon->header.frame_ctl) ? |
1404 | (beacon->header. | ||
1405 | frame_ctl)) ? | ||
1406 | "BEACON" : "PROBE RESPONSE"); | 1412 | "BEACON" : "PROBE RESPONSE"); |
1407 | update_network(target, &network); | 1413 | update_network(target, &network); |
1408 | } | 1414 | } |
1409 | 1415 | ||
1410 | spin_unlock_irqrestore(&ieee->lock, flags); | 1416 | spin_unlock_irqrestore(&ieee->lock, flags); |
1411 | 1417 | ||
1412 | if (is_beacon(le16_to_cpu(beacon->header.frame_ctl))) { | 1418 | if (is_beacon(beacon->header.frame_ctl)) { |
1413 | if (ieee->handle_beacon != NULL) | 1419 | if (ieee->handle_beacon != NULL) |
1414 | ieee->handle_beacon(dev, beacon, &network); | 1420 | ieee->handle_beacon(dev, beacon, target); |
1415 | } else { | 1421 | } else { |
1416 | if (ieee->handle_probe_response != NULL) | 1422 | if (ieee->handle_probe_response != NULL) |
1417 | ieee->handle_probe_response(dev, beacon, &network); | 1423 | ieee->handle_probe_response(dev, beacon, target); |
1418 | } | 1424 | } |
1419 | } | 1425 | } |
1420 | 1426 | ||
@@ -1439,7 +1445,7 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, | |||
1439 | break; | 1445 | break; |
1440 | 1446 | ||
1441 | case IEEE80211_STYPE_PROBE_REQ: | 1447 | case IEEE80211_STYPE_PROBE_REQ: |
1442 | IEEE80211_DEBUG_MGMT("recieved auth (%d)\n", | 1448 | IEEE80211_DEBUG_MGMT("received auth (%d)\n", |
1443 | WLAN_FC_GET_STYPE(le16_to_cpu | 1449 | WLAN_FC_GET_STYPE(le16_to_cpu |
1444 | (header->frame_ctl))); | 1450 | (header->frame_ctl))); |
1445 | 1451 | ||
@@ -1473,7 +1479,7 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, | |||
1473 | break; | 1479 | break; |
1474 | case IEEE80211_STYPE_AUTH: | 1480 | case IEEE80211_STYPE_AUTH: |
1475 | 1481 | ||
1476 | IEEE80211_DEBUG_MGMT("recieved auth (%d)\n", | 1482 | IEEE80211_DEBUG_MGMT("received auth (%d)\n", |
1477 | WLAN_FC_GET_STYPE(le16_to_cpu | 1483 | WLAN_FC_GET_STYPE(le16_to_cpu |
1478 | (header->frame_ctl))); | 1484 | (header->frame_ctl))); |
1479 | 1485 | ||
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index 23e1630f50b7..f87c6b89f845 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c | |||
@@ -232,15 +232,18 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee, | |||
232 | return start; | 232 | return start; |
233 | } | 233 | } |
234 | 234 | ||
235 | #define SCAN_ITEM_SIZE 128 | ||
236 | |||
235 | int ieee80211_wx_get_scan(struct ieee80211_device *ieee, | 237 | int ieee80211_wx_get_scan(struct ieee80211_device *ieee, |
236 | struct iw_request_info *info, | 238 | struct iw_request_info *info, |
237 | union iwreq_data *wrqu, char *extra) | 239 | union iwreq_data *wrqu, char *extra) |
238 | { | 240 | { |
239 | struct ieee80211_network *network; | 241 | struct ieee80211_network *network; |
240 | unsigned long flags; | 242 | unsigned long flags; |
243 | int err = 0; | ||
241 | 244 | ||
242 | char *ev = extra; | 245 | char *ev = extra; |
243 | char *stop = ev + IW_SCAN_MAX_DATA; | 246 | char *stop = ev + wrqu->data.length; |
244 | int i = 0; | 247 | int i = 0; |
245 | 248 | ||
246 | IEEE80211_DEBUG_WX("Getting scan\n"); | 249 | IEEE80211_DEBUG_WX("Getting scan\n"); |
@@ -249,6 +252,11 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee, | |||
249 | 252 | ||
250 | list_for_each_entry(network, &ieee->network_list, list) { | 253 | list_for_each_entry(network, &ieee->network_list, list) { |
251 | i++; | 254 | i++; |
255 | if (stop - ev < SCAN_ITEM_SIZE) { | ||
256 | err = -E2BIG; | ||
257 | break; | ||
258 | } | ||
259 | |||
252 | if (ieee->scan_age == 0 || | 260 | if (ieee->scan_age == 0 || |
253 | time_after(network->last_scanned + ieee->scan_age, jiffies)) | 261 | time_after(network->last_scanned + ieee->scan_age, jiffies)) |
254 | ev = ipw2100_translate_scan(ieee, ev, stop, network); | 262 | ev = ipw2100_translate_scan(ieee, ev, stop, network); |
@@ -270,7 +278,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee, | |||
270 | 278 | ||
271 | IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i); | 279 | IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i); |
272 | 280 | ||
273 | return 0; | 281 | return err; |
274 | } | 282 | } |
275 | 283 | ||
276 | int ieee80211_wx_set_encode(struct ieee80211_device *ieee, | 284 | int ieee80211_wx_set_encode(struct ieee80211_device *ieee, |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 95b9d81ac488..3ffa60dadc0c 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1135,7 +1135,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa) | |||
1135 | 1135 | ||
1136 | if (!skb) | 1136 | if (!skb) |
1137 | netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, ENOBUFS); | 1137 | netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, ENOBUFS); |
1138 | else if (inet_fill_ifaddr(skb, ifa, current->pid, 0, event, 0) < 0) { | 1138 | else if (inet_fill_ifaddr(skb, ifa, 0, 0, event, 0) < 0) { |
1139 | kfree_skb(skb); | 1139 | kfree_skb(skb); |
1140 | netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, EINVAL); | 1140 | netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, EINVAL); |
1141 | } else { | 1141 | } else { |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 73bfcae8af9c..09590f356086 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -12,13 +12,6 @@ | |||
12 | #include <net/protocol.h> | 12 | #include <net/protocol.h> |
13 | #include <net/udp.h> | 13 | #include <net/udp.h> |
14 | 14 | ||
15 | /* decapsulation data for use when post-processing */ | ||
16 | struct esp_decap_data { | ||
17 | xfrm_address_t saddr; | ||
18 | __u16 sport; | ||
19 | __u8 proto; | ||
20 | }; | ||
21 | |||
22 | static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | 15 | static int esp_output(struct xfrm_state *x, struct sk_buff *skb) |
23 | { | 16 | { |
24 | int err; | 17 | int err; |
@@ -150,6 +143,10 @@ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc | |||
150 | int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; | 143 | int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; |
151 | int nfrags; | 144 | int nfrags; |
152 | int encap_len = 0; | 145 | int encap_len = 0; |
146 | u8 nexthdr[2]; | ||
147 | struct scatterlist *sg; | ||
148 | u8 workbuf[60]; | ||
149 | int padlen; | ||
153 | 150 | ||
154 | if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) | 151 | if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) |
155 | goto out; | 152 | goto out; |
@@ -185,122 +182,82 @@ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc | |||
185 | if (esp->conf.ivlen) | 182 | if (esp->conf.ivlen) |
186 | crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); | 183 | crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); |
187 | 184 | ||
188 | { | 185 | sg = &esp->sgbuf[0]; |
189 | u8 nexthdr[2]; | ||
190 | struct scatterlist *sg = &esp->sgbuf[0]; | ||
191 | u8 workbuf[60]; | ||
192 | int padlen; | ||
193 | |||
194 | if (unlikely(nfrags > ESP_NUM_FAST_SG)) { | ||
195 | sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); | ||
196 | if (!sg) | ||
197 | goto out; | ||
198 | } | ||
199 | skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); | ||
200 | crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); | ||
201 | if (unlikely(sg != &esp->sgbuf[0])) | ||
202 | kfree(sg); | ||
203 | |||
204 | if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) | ||
205 | BUG(); | ||
206 | 186 | ||
207 | padlen = nexthdr[0]; | 187 | if (unlikely(nfrags > ESP_NUM_FAST_SG)) { |
208 | if (padlen+2 >= elen) | 188 | sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); |
189 | if (!sg) | ||
209 | goto out; | 190 | goto out; |
210 | |||
211 | /* ... check padding bits here. Silly. :-) */ | ||
212 | |||
213 | if (x->encap && decap && decap->decap_type) { | ||
214 | struct esp_decap_data *encap_data; | ||
215 | struct udphdr *uh = (struct udphdr *) (iph+1); | ||
216 | |||
217 | encap_data = (struct esp_decap_data *) (decap->decap_data); | ||
218 | encap_data->proto = 0; | ||
219 | |||
220 | switch (decap->decap_type) { | ||
221 | case UDP_ENCAP_ESPINUDP: | ||
222 | case UDP_ENCAP_ESPINUDP_NON_IKE: | ||
223 | encap_data->proto = AF_INET; | ||
224 | encap_data->saddr.a4 = iph->saddr; | ||
225 | encap_data->sport = uh->source; | ||
226 | encap_len = (void*)esph - (void*)uh; | ||
227 | break; | ||
228 | |||
229 | default: | ||
230 | goto out; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | iph->protocol = nexthdr[1]; | ||
235 | pskb_trim(skb, skb->len - alen - padlen - 2); | ||
236 | memcpy(workbuf, skb->nh.raw, iph->ihl*4); | ||
237 | skb->h.raw = skb_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen); | ||
238 | skb->nh.raw += encap_len + sizeof(struct ip_esp_hdr) + esp->conf.ivlen; | ||
239 | memcpy(skb->nh.raw, workbuf, iph->ihl*4); | ||
240 | skb->nh.iph->tot_len = htons(skb->len); | ||
241 | } | 191 | } |
192 | skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); | ||
193 | crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); | ||
194 | if (unlikely(sg != &esp->sgbuf[0])) | ||
195 | kfree(sg); | ||
242 | 196 | ||
243 | return 0; | 197 | if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) |
198 | BUG(); | ||
244 | 199 | ||
245 | out: | 200 | padlen = nexthdr[0]; |
246 | return -EINVAL; | 201 | if (padlen+2 >= elen) |
247 | } | 202 | goto out; |
248 | 203 | ||
249 | static int esp_post_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) | 204 | /* ... check padding bits here. Silly. :-) */ |
250 | { | ||
251 | |||
252 | if (x->encap) { | ||
253 | struct xfrm_encap_tmpl *encap; | ||
254 | struct esp_decap_data *decap_data; | ||
255 | 205 | ||
256 | encap = x->encap; | 206 | if (x->encap) { |
257 | decap_data = (struct esp_decap_data *)(decap->decap_data); | 207 | struct xfrm_encap_tmpl *encap = x->encap; |
208 | struct udphdr *uh; | ||
258 | 209 | ||
259 | /* first, make sure that the decap type == the encap type */ | ||
260 | if (encap->encap_type != decap->decap_type) | 210 | if (encap->encap_type != decap->decap_type) |
261 | return -EINVAL; | 211 | goto out; |
262 | 212 | ||
263 | switch (encap->encap_type) { | 213 | uh = (struct udphdr *)(iph + 1); |
264 | default: | 214 | encap_len = (void*)esph - (void*)uh; |
265 | case UDP_ENCAP_ESPINUDP: | 215 | |
266 | case UDP_ENCAP_ESPINUDP_NON_IKE: | 216 | /* |
267 | /* | 217 | * 1) if the NAT-T peer's IP or port changed then |
268 | * 1) if the NAT-T peer's IP or port changed then | 218 | * advertize the change to the keying daemon. |
269 | * advertize the change to the keying daemon. | 219 | * This is an inbound SA, so just compare |
270 | * This is an inbound SA, so just compare | 220 | * SRC ports. |
271 | * SRC ports. | 221 | */ |
272 | */ | 222 | if (iph->saddr != x->props.saddr.a4 || |
273 | if (decap_data->proto == AF_INET && | 223 | uh->source != encap->encap_sport) { |
274 | (decap_data->saddr.a4 != x->props.saddr.a4 || | 224 | xfrm_address_t ipaddr; |
275 | decap_data->sport != encap->encap_sport)) { | 225 | |
276 | xfrm_address_t ipaddr; | 226 | ipaddr.a4 = iph->saddr; |
277 | 227 | km_new_mapping(x, &ipaddr, uh->source); | |
278 | ipaddr.a4 = decap_data->saddr.a4; | 228 | |
279 | km_new_mapping(x, &ipaddr, decap_data->sport); | 229 | /* XXX: perhaps add an extra |
280 | 230 | * policy check here, to see | |
281 | /* XXX: perhaps add an extra | 231 | * if we should allow or |
282 | * policy check here, to see | 232 | * reject a packet from a |
283 | * if we should allow or | 233 | * different source |
284 | * reject a packet from a | 234 | * address/port. |
285 | * different source | ||
286 | * address/port. | ||
287 | */ | ||
288 | } | ||
289 | |||
290 | /* | ||
291 | * 2) ignore UDP/TCP checksums in case | ||
292 | * of NAT-T in Transport Mode, or | ||
293 | * perform other post-processing fixes | ||
294 | * as per * draft-ietf-ipsec-udp-encaps-06, | ||
295 | * section 3.1.2 | ||
296 | */ | 235 | */ |
297 | if (!x->props.mode) | ||
298 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
299 | |||
300 | break; | ||
301 | } | 236 | } |
237 | |||
238 | /* | ||
239 | * 2) ignore UDP/TCP checksums in case | ||
240 | * of NAT-T in Transport Mode, or | ||
241 | * perform other post-processing fixes | ||
242 | * as per draft-ietf-ipsec-udp-encaps-06, | ||
243 | * section 3.1.2 | ||
244 | */ | ||
245 | if (!x->props.mode) | ||
246 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
302 | } | 247 | } |
248 | |||
249 | iph->protocol = nexthdr[1]; | ||
250 | pskb_trim(skb, skb->len - alen - padlen - 2); | ||
251 | memcpy(workbuf, skb->nh.raw, iph->ihl*4); | ||
252 | skb->h.raw = skb_pull(skb, sizeof(struct ip_esp_hdr) + esp->conf.ivlen); | ||
253 | skb->nh.raw += encap_len + sizeof(struct ip_esp_hdr) + esp->conf.ivlen; | ||
254 | memcpy(skb->nh.raw, workbuf, iph->ihl*4); | ||
255 | skb->nh.iph->tot_len = htons(skb->len); | ||
256 | |||
303 | return 0; | 257 | return 0; |
258 | |||
259 | out: | ||
260 | return -EINVAL; | ||
304 | } | 261 | } |
305 | 262 | ||
306 | static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) | 263 | static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) |
@@ -458,7 +415,6 @@ static struct xfrm_type esp_type = | |||
458 | .destructor = esp_destroy, | 415 | .destructor = esp_destroy, |
459 | .get_max_size = esp4_get_max_size, | 416 | .get_max_size = esp4_get_max_size, |
460 | .input = esp_input, | 417 | .input = esp_input, |
461 | .post_input = esp_post_input, | ||
462 | .output = esp_output | 418 | .output = esp_output |
463 | }; | 419 | }; |
464 | 420 | ||
@@ -470,15 +426,6 @@ static struct net_protocol esp4_protocol = { | |||
470 | 426 | ||
471 | static int __init esp4_init(void) | 427 | static int __init esp4_init(void) |
472 | { | 428 | { |
473 | struct xfrm_decap_state decap; | ||
474 | |||
475 | if (sizeof(struct esp_decap_data) > | ||
476 | sizeof(decap.decap_data)) { | ||
477 | extern void decap_data_too_small(void); | ||
478 | |||
479 | decap_data_too_small(); | ||
480 | } | ||
481 | |||
482 | if (xfrm_register_type(&esp_type, AF_INET) < 0) { | 429 | if (xfrm_register_type(&esp_type, AF_INET) < 0) { |
483 | printk(KERN_INFO "ip esp init: can't add xfrm type\n"); | 430 | printk(KERN_INFO "ip esp init: can't add xfrm type\n"); |
484 | return -EAGAIN; | 431 | return -EAGAIN; |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index ef4724de7350..0f4145babb14 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -1045,7 +1045,7 @@ fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm, | |||
1045 | } | 1045 | } |
1046 | 1046 | ||
1047 | nl->nlmsg_flags = NLM_F_REQUEST; | 1047 | nl->nlmsg_flags = NLM_F_REQUEST; |
1048 | nl->nlmsg_pid = current->pid; | 1048 | nl->nlmsg_pid = 0; |
1049 | nl->nlmsg_seq = 0; | 1049 | nl->nlmsg_seq = 0; |
1050 | nl->nlmsg_len = NLMSG_LENGTH(sizeof(*rtm)); | 1050 | nl->nlmsg_len = NLMSG_LENGTH(sizeof(*rtm)); |
1051 | if (cmd == SIOCDELRT) { | 1051 | if (cmd == SIOCDELRT) { |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 105039eb7629..e7bbff4340bb 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -192,7 +192,7 @@ int sysctl_icmp_echo_ignore_all; | |||
192 | int sysctl_icmp_echo_ignore_broadcasts = 1; | 192 | int sysctl_icmp_echo_ignore_broadcasts = 1; |
193 | 193 | ||
194 | /* Control parameter - ignore bogus broadcast responses? */ | 194 | /* Control parameter - ignore bogus broadcast responses? */ |
195 | int sysctl_icmp_ignore_bogus_error_responses; | 195 | int sysctl_icmp_ignore_bogus_error_responses = 1; |
196 | 196 | ||
197 | /* | 197 | /* |
198 | * Configurable global rate limit. | 198 | * Configurable global rate limit. |
@@ -385,7 +385,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
385 | u32 daddr; | 385 | u32 daddr; |
386 | 386 | ||
387 | if (ip_options_echo(&icmp_param->replyopts, skb)) | 387 | if (ip_options_echo(&icmp_param->replyopts, skb)) |
388 | goto out; | 388 | return; |
389 | 389 | ||
390 | if (icmp_xmit_lock()) | 390 | if (icmp_xmit_lock()) |
391 | return; | 391 | return; |
@@ -416,7 +416,6 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
416 | ip_rt_put(rt); | 416 | ip_rt_put(rt); |
417 | out_unlock: | 417 | out_unlock: |
418 | icmp_xmit_unlock(); | 418 | icmp_xmit_unlock(); |
419 | out:; | ||
420 | } | 419 | } |
421 | 420 | ||
422 | 421 | ||
@@ -525,7 +524,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info) | |||
525 | iph->tos; | 524 | iph->tos; |
526 | 525 | ||
527 | if (ip_options_echo(&icmp_param.replyopts, skb_in)) | 526 | if (ip_options_echo(&icmp_param.replyopts, skb_in)) |
528 | goto ende; | 527 | goto out_unlock; |
529 | 528 | ||
530 | 529 | ||
531 | /* | 530 | /* |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index d8ce7133cd8f..64ce52bf0485 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -970,7 +970,7 @@ int igmp_rcv(struct sk_buff *skb) | |||
970 | case IGMP_MTRACE_RESP: | 970 | case IGMP_MTRACE_RESP: |
971 | break; | 971 | break; |
972 | default: | 972 | default: |
973 | NETDEBUG(KERN_DEBUG "New IGMP type=%d, why we do not know about it?\n", ih->type); | 973 | break; |
974 | } | 974 | } |
975 | 975 | ||
976 | drop: | 976 | drop: |
@@ -1578,7 +1578,7 @@ static int sf_setstate(struct ip_mc_list *pmc) | |||
1578 | new_in = psf->sf_count[MCAST_INCLUDE] != 0; | 1578 | new_in = psf->sf_count[MCAST_INCLUDE] != 0; |
1579 | if (new_in) { | 1579 | if (new_in) { |
1580 | if (!psf->sf_oldin) { | 1580 | if (!psf->sf_oldin) { |
1581 | struct ip_sf_list *prev = 0; | 1581 | struct ip_sf_list *prev = NULL; |
1582 | 1582 | ||
1583 | for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) { | 1583 | for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) { |
1584 | if (dpsf->sf_inaddr == psf->sf_inaddr) | 1584 | if (dpsf->sf_inaddr == psf->sf_inaddr) |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index abe23923e4e7..9981dcd68f11 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -830,7 +830,8 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
830 | skb->h.raw = skb->nh.raw; | 830 | skb->h.raw = skb->nh.raw; |
831 | skb->nh.raw = skb_push(skb, gre_hlen); | 831 | skb->nh.raw = skb_push(skb, gre_hlen); |
832 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 832 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
833 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE|IPSKB_XFRM_TRANSFORMED); | 833 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | |
834 | IPSKB_REROUTED); | ||
834 | dst_release(skb->dst); | 835 | dst_release(skb->dst); |
835 | skb->dst = &rt->u.dst; | 836 | skb->dst = &rt->u.dst; |
836 | 837 | ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 3324fbfe528a..8ee4d016740d 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -207,8 +207,10 @@ static inline int ip_finish_output(struct sk_buff *skb) | |||
207 | { | 207 | { |
208 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) | 208 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) |
209 | /* Policy lookup after SNAT yielded a new policy */ | 209 | /* Policy lookup after SNAT yielded a new policy */ |
210 | if (skb->dst->xfrm != NULL) | 210 | if (skb->dst->xfrm != NULL) { |
211 | return xfrm4_output_finish(skb); | 211 | IPCB(skb)->flags |= IPSKB_REROUTED; |
212 | return dst_output(skb); | ||
213 | } | ||
212 | #endif | 214 | #endif |
213 | if (skb->len > dst_mtu(skb->dst) && | 215 | if (skb->len > dst_mtu(skb->dst) && |
214 | !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size)) | 216 | !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size)) |
@@ -271,8 +273,9 @@ int ip_mc_output(struct sk_buff *skb) | |||
271 | newskb->dev, ip_dev_loopback_xmit); | 273 | newskb->dev, ip_dev_loopback_xmit); |
272 | } | 274 | } |
273 | 275 | ||
274 | return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev, | 276 | return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev, |
275 | ip_finish_output); | 277 | ip_finish_output, |
278 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | ||
276 | } | 279 | } |
277 | 280 | ||
278 | int ip_output(struct sk_buff *skb) | 281 | int ip_output(struct sk_buff *skb) |
@@ -284,8 +287,9 @@ int ip_output(struct sk_buff *skb) | |||
284 | skb->dev = dev; | 287 | skb->dev = dev; |
285 | skb->protocol = htons(ETH_P_IP); | 288 | skb->protocol = htons(ETH_P_IP); |
286 | 289 | ||
287 | return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev, | 290 | return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev, |
288 | ip_finish_output); | 291 | ip_finish_output, |
292 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | ||
289 | } | 293 | } |
290 | 294 | ||
291 | int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | 295 | int ip_queue_xmit(struct sk_buff *skb, int ipfragok) |
@@ -843,10 +847,11 @@ int ip_append_data(struct sock *sk, | |||
843 | if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && | 847 | if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && |
844 | (rt->u.dst.dev->features & NETIF_F_UFO)) { | 848 | (rt->u.dst.dev->features & NETIF_F_UFO)) { |
845 | 849 | ||
846 | if(ip_ufo_append_data(sk, getfrag, from, length, hh_len, | 850 | err = ip_ufo_append_data(sk, getfrag, from, length, hh_len, |
847 | fragheaderlen, transhdrlen, mtu, flags)) | 851 | fragheaderlen, transhdrlen, mtu, |
852 | flags); | ||
853 | if (err) | ||
848 | goto error; | 854 | goto error; |
849 | |||
850 | return 0; | 855 | return 0; |
851 | } | 856 | } |
852 | 857 | ||
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index e5cbe72c6b80..03d13742a4b8 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -622,7 +622,8 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
622 | skb->h.raw = skb->nh.raw; | 622 | skb->h.raw = skb->nh.raw; |
623 | skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); | 623 | skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); |
624 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 624 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
625 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE|IPSKB_XFRM_TRANSFORMED); | 625 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | |
626 | IPSKB_REROUTED); | ||
626 | dst_release(skb->dst); | 627 | dst_release(skb->dst); |
627 | skb->dst = &rt->u.dst; | 628 | skb->dst = &rt->u.dst; |
628 | 629 | ||
diff --git a/net/ipv4/multipath_wrandom.c b/net/ipv4/multipath_wrandom.c index d34a9fa608e0..342d0b9098f5 100644 --- a/net/ipv4/multipath_wrandom.c +++ b/net/ipv4/multipath_wrandom.c | |||
@@ -228,7 +228,7 @@ static void wrandom_set_nhinfo(__u32 network, | |||
228 | struct multipath_dest *d, *target_dest = NULL; | 228 | struct multipath_dest *d, *target_dest = NULL; |
229 | 229 | ||
230 | /* store the weight information for a certain route */ | 230 | /* store the weight information for a certain route */ |
231 | spin_lock(&state[state_idx].lock); | 231 | spin_lock_bh(&state[state_idx].lock); |
232 | 232 | ||
233 | /* find state entry for gateway or add one if necessary */ | 233 | /* find state entry for gateway or add one if necessary */ |
234 | list_for_each_entry_rcu(r, &state[state_idx].head, list) { | 234 | list_for_each_entry_rcu(r, &state[state_idx].head, list) { |
@@ -276,7 +276,7 @@ static void wrandom_set_nhinfo(__u32 network, | |||
276 | * we are finished | 276 | * we are finished |
277 | */ | 277 | */ |
278 | 278 | ||
279 | spin_unlock(&state[state_idx].lock); | 279 | spin_unlock_bh(&state[state_idx].lock); |
280 | } | 280 | } |
281 | 281 | ||
282 | static void __multipath_free(struct rcu_head *head) | 282 | static void __multipath_free(struct rcu_head *head) |
@@ -302,7 +302,7 @@ static void wrandom_flush(void) | |||
302 | for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) { | 302 | for (i = 0; i < MULTIPATH_STATE_SIZE; ++i) { |
303 | struct multipath_route *r; | 303 | struct multipath_route *r; |
304 | 304 | ||
305 | spin_lock(&state[i].lock); | 305 | spin_lock_bh(&state[i].lock); |
306 | list_for_each_entry_rcu(r, &state[i].head, list) { | 306 | list_for_each_entry_rcu(r, &state[i].head, list) { |
307 | struct multipath_dest *d; | 307 | struct multipath_dest *d; |
308 | list_for_each_entry_rcu(d, &r->dests, list) { | 308 | list_for_each_entry_rcu(d, &r->dests, list) { |
@@ -315,7 +315,7 @@ static void wrandom_flush(void) | |||
315 | __multipath_free); | 315 | __multipath_free); |
316 | } | 316 | } |
317 | 317 | ||
318 | spin_unlock(&state[i].lock); | 318 | spin_unlock_bh(&state[i].lock); |
319 | } | 319 | } |
320 | } | 320 | } |
321 | 321 | ||
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 52a3d7c57907..ed42cdc57cd9 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -78,6 +78,47 @@ int ip_route_me_harder(struct sk_buff **pskb) | |||
78 | } | 78 | } |
79 | EXPORT_SYMBOL(ip_route_me_harder); | 79 | EXPORT_SYMBOL(ip_route_me_harder); |
80 | 80 | ||
81 | #ifdef CONFIG_XFRM | ||
82 | int ip_xfrm_me_harder(struct sk_buff **pskb) | ||
83 | { | ||
84 | struct flowi fl; | ||
85 | unsigned int hh_len; | ||
86 | struct dst_entry *dst; | ||
87 | |||
88 | if (IPCB(*pskb)->flags & IPSKB_XFRM_TRANSFORMED) | ||
89 | return 0; | ||
90 | if (xfrm_decode_session(*pskb, &fl, AF_INET) < 0) | ||
91 | return -1; | ||
92 | |||
93 | dst = (*pskb)->dst; | ||
94 | if (dst->xfrm) | ||
95 | dst = ((struct xfrm_dst *)dst)->route; | ||
96 | dst_hold(dst); | ||
97 | |||
98 | if (xfrm_lookup(&dst, &fl, (*pskb)->sk, 0) < 0) | ||
99 | return -1; | ||
100 | |||
101 | dst_release((*pskb)->dst); | ||
102 | (*pskb)->dst = dst; | ||
103 | |||
104 | /* Change in oif may mean change in hh_len. */ | ||
105 | hh_len = (*pskb)->dst->dev->hard_header_len; | ||
106 | if (skb_headroom(*pskb) < hh_len) { | ||
107 | struct sk_buff *nskb; | ||
108 | |||
109 | nskb = skb_realloc_headroom(*pskb, hh_len); | ||
110 | if (!nskb) | ||
111 | return -1; | ||
112 | if ((*pskb)->sk) | ||
113 | skb_set_owner_w(nskb, (*pskb)->sk); | ||
114 | kfree_skb(*pskb); | ||
115 | *pskb = nskb; | ||
116 | } | ||
117 | return 0; | ||
118 | } | ||
119 | EXPORT_SYMBOL(ip_xfrm_me_harder); | ||
120 | #endif | ||
121 | |||
81 | void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); | 122 | void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); |
82 | EXPORT_SYMBOL(ip_nat_decode_session); | 123 | EXPORT_SYMBOL(ip_nat_decode_session); |
83 | 124 | ||
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index afe3d8f8177d..7d7ab94a7a2e 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -771,7 +771,7 @@ static int get_entries(const struct arpt_get_entries *entries, | |||
771 | struct arpt_table *t; | 771 | struct arpt_table *t; |
772 | 772 | ||
773 | t = xt_find_table_lock(NF_ARP, entries->name); | 773 | t = xt_find_table_lock(NF_ARP, entries->name); |
774 | if (t || !IS_ERR(t)) { | 774 | if (t && !IS_ERR(t)) { |
775 | struct xt_table_info *private = t->private; | 775 | struct xt_table_info *private = t->private; |
776 | duprintf("t->private->number = %u\n", | 776 | duprintf("t->private->number = %u\n", |
777 | private->number); | 777 | private->number); |
@@ -807,6 +807,13 @@ static int do_replace(void __user *user, unsigned int len) | |||
807 | if (len != sizeof(tmp) + tmp.size) | 807 | if (len != sizeof(tmp) + tmp.size) |
808 | return -ENOPROTOOPT; | 808 | return -ENOPROTOOPT; |
809 | 809 | ||
810 | /* overflow check */ | ||
811 | if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS - | ||
812 | SMP_CACHE_BYTES) | ||
813 | return -ENOMEM; | ||
814 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | ||
815 | return -ENOMEM; | ||
816 | |||
810 | newinfo = xt_alloc_table_info(tmp.size); | 817 | newinfo = xt_alloc_table_info(tmp.size); |
811 | if (!newinfo) | 818 | if (!newinfo) |
812 | return -ENOMEM; | 819 | return -ENOMEM; |
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c index c9ebbe0d2d9c..e0b5926c76f9 100644 --- a/net/ipv4/netfilter/ip_conntrack_netlink.c +++ b/net/ipv4/netfilter/ip_conntrack_netlink.c | |||
@@ -1216,7 +1216,7 @@ static int ctnetlink_expect_event(struct notifier_block *this, | |||
1216 | 1216 | ||
1217 | b = skb->tail; | 1217 | b = skb->tail; |
1218 | 1218 | ||
1219 | type |= NFNL_SUBSYS_CTNETLINK << 8; | 1219 | type |= NFNL_SUBSYS_CTNETLINK_EXP << 8; |
1220 | nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); | 1220 | nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); |
1221 | nfmsg = NLMSG_DATA(nlh); | 1221 | nfmsg = NLMSG_DATA(nlh); |
1222 | 1222 | ||
@@ -1567,6 +1567,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = { | |||
1567 | }; | 1567 | }; |
1568 | 1568 | ||
1569 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); | 1569 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); |
1570 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); | ||
1570 | 1571 | ||
1571 | static int __init ctnetlink_init(void) | 1572 | static int __init ctnetlink_init(void) |
1572 | { | 1573 | { |
diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c index d3c5a371f993..4ba4463cec28 100644 --- a/net/ipv4/netfilter/ip_conntrack_tftp.c +++ b/net/ipv4/netfilter/ip_conntrack_tftp.c | |||
@@ -71,6 +71,7 @@ static int tftp_help(struct sk_buff **pskb, | |||
71 | 71 | ||
72 | exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; | 72 | exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; |
73 | exp->mask.src.ip = 0xffffffff; | 73 | exp->mask.src.ip = 0xffffffff; |
74 | exp->mask.src.u.udp.port = 0; | ||
74 | exp->mask.dst.ip = 0xffffffff; | 75 | exp->mask.dst.ip = 0xffffffff; |
75 | exp->mask.dst.u.udp.port = 0xffff; | 76 | exp->mask.dst.u.udp.port = 0xffff; |
76 | exp->mask.dst.protonum = 0xff; | 77 | exp->mask.dst.protonum = 0xff; |
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c index c1a61462507f..1741d555ad0d 100644 --- a/net/ipv4/netfilter/ip_nat_core.c +++ b/net/ipv4/netfilter/ip_nat_core.c | |||
@@ -434,6 +434,7 @@ int ip_nat_icmp_reply_translation(struct sk_buff **pskb, | |||
434 | } *inside; | 434 | } *inside; |
435 | struct ip_conntrack_tuple inner, target; | 435 | struct ip_conntrack_tuple inner, target; |
436 | int hdrlen = (*pskb)->nh.iph->ihl * 4; | 436 | int hdrlen = (*pskb)->nh.iph->ihl * 4; |
437 | unsigned long statusbit; | ||
437 | 438 | ||
438 | if (!skb_make_writable(pskb, hdrlen + sizeof(*inside))) | 439 | if (!skb_make_writable(pskb, hdrlen + sizeof(*inside))) |
439 | return 0; | 440 | return 0; |
@@ -495,17 +496,16 @@ int ip_nat_icmp_reply_translation(struct sk_buff **pskb, | |||
495 | 496 | ||
496 | /* Change outer to look the reply to an incoming packet | 497 | /* Change outer to look the reply to an incoming packet |
497 | * (proto 0 means don't invert per-proto part). */ | 498 | * (proto 0 means don't invert per-proto part). */ |
499 | if (manip == IP_NAT_MANIP_SRC) | ||
500 | statusbit = IPS_SRC_NAT; | ||
501 | else | ||
502 | statusbit = IPS_DST_NAT; | ||
498 | 503 | ||
499 | /* Obviously, we need to NAT destination IP, but source IP | 504 | /* Invert if this is reply dir. */ |
500 | should be NAT'ed only if it is from a NAT'd host. | 505 | if (dir == IP_CT_DIR_REPLY) |
506 | statusbit ^= IPS_NAT_MASK; | ||
501 | 507 | ||
502 | Explanation: some people use NAT for anonymizing. Also, | 508 | if (ct->status & statusbit) { |
503 | CERT recommends dropping all packets from private IP | ||
504 | addresses (although ICMP errors from internal links with | ||
505 | such addresses are not too uncommon, as Alan Cox points | ||
506 | out) */ | ||
507 | if (manip != IP_NAT_MANIP_SRC | ||
508 | || ((*pskb)->nh.iph->saddr == ct->tuplehash[dir].tuple.src.ip)) { | ||
509 | invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | 509 | invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); |
510 | if (!manip_pkt(0, pskb, 0, &target, manip)) | 510 | if (!manip_pkt(0, pskb, 0, &target, manip)) |
511 | return 0; | 511 | return 0; |
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c index ad438fb185b8..ab1f88fa21ec 100644 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ b/net/ipv4/netfilter/ip_nat_standalone.c | |||
@@ -200,20 +200,14 @@ ip_nat_in(unsigned int hooknum, | |||
200 | const struct net_device *out, | 200 | const struct net_device *out, |
201 | int (*okfn)(struct sk_buff *)) | 201 | int (*okfn)(struct sk_buff *)) |
202 | { | 202 | { |
203 | struct ip_conntrack *ct; | ||
204 | enum ip_conntrack_info ctinfo; | ||
205 | unsigned int ret; | 203 | unsigned int ret; |
204 | u_int32_t daddr = (*pskb)->nh.iph->daddr; | ||
206 | 205 | ||
207 | ret = ip_nat_fn(hooknum, pskb, in, out, okfn); | 206 | ret = ip_nat_fn(hooknum, pskb, in, out, okfn); |
208 | if (ret != NF_DROP && ret != NF_STOLEN | 207 | if (ret != NF_DROP && ret != NF_STOLEN |
209 | && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { | 208 | && daddr != (*pskb)->nh.iph->daddr) { |
210 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 209 | dst_release((*pskb)->dst); |
211 | 210 | (*pskb)->dst = NULL; | |
212 | if (ct->tuplehash[dir].tuple.src.ip != | ||
213 | ct->tuplehash[!dir].tuple.dst.ip) { | ||
214 | dst_release((*pskb)->dst); | ||
215 | (*pskb)->dst = NULL; | ||
216 | } | ||
217 | } | 211 | } |
218 | return ret; | 212 | return ret; |
219 | } | 213 | } |
@@ -235,19 +229,19 @@ ip_nat_out(unsigned int hooknum, | |||
235 | return NF_ACCEPT; | 229 | return NF_ACCEPT; |
236 | 230 | ||
237 | ret = ip_nat_fn(hooknum, pskb, in, out, okfn); | 231 | ret = ip_nat_fn(hooknum, pskb, in, out, okfn); |
232 | #ifdef CONFIG_XFRM | ||
238 | if (ret != NF_DROP && ret != NF_STOLEN | 233 | if (ret != NF_DROP && ret != NF_STOLEN |
239 | && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { | 234 | && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { |
240 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | 235 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
241 | 236 | ||
242 | if (ct->tuplehash[dir].tuple.src.ip != | 237 | if (ct->tuplehash[dir].tuple.src.ip != |
243 | ct->tuplehash[!dir].tuple.dst.ip | 238 | ct->tuplehash[!dir].tuple.dst.ip |
244 | #ifdef CONFIG_XFRM | ||
245 | || ct->tuplehash[dir].tuple.src.u.all != | 239 | || ct->tuplehash[dir].tuple.src.u.all != |
246 | ct->tuplehash[!dir].tuple.dst.u.all | 240 | ct->tuplehash[!dir].tuple.dst.u.all |
247 | #endif | ||
248 | ) | 241 | ) |
249 | return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; | 242 | return ip_xfrm_me_harder(pskb) == 0 ? ret : NF_DROP; |
250 | } | 243 | } |
244 | #endif | ||
251 | return ret; | 245 | return ret; |
252 | } | 246 | } |
253 | 247 | ||
@@ -276,7 +270,7 @@ ip_nat_local_fn(unsigned int hooknum, | |||
276 | ct->tuplehash[!dir].tuple.src.ip | 270 | ct->tuplehash[!dir].tuple.src.ip |
277 | #ifdef CONFIG_XFRM | 271 | #ifdef CONFIG_XFRM |
278 | || ct->tuplehash[dir].tuple.dst.u.all != | 272 | || ct->tuplehash[dir].tuple.dst.u.all != |
279 | ct->tuplehash[dir].tuple.src.u.all | 273 | ct->tuplehash[!dir].tuple.src.u.all |
280 | #endif | 274 | #endif |
281 | ) | 275 | ) |
282 | return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; | 276 | return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; |
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 36339eb39e17..08f80e2ea2aa 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c | |||
@@ -524,7 +524,7 @@ ipq_rcv_skb(struct sk_buff *skb) | |||
524 | write_unlock_bh(&queue_lock); | 524 | write_unlock_bh(&queue_lock); |
525 | 525 | ||
526 | status = ipq_receive_peer(NLMSG_DATA(nlh), type, | 526 | status = ipq_receive_peer(NLMSG_DATA(nlh), type, |
527 | skblen - NLMSG_LENGTH(0)); | 527 | nlmsglen - NLMSG_LENGTH(0)); |
528 | if (status < 0) | 528 | if (status < 0) |
529 | RCV_SKB_FAIL(status); | 529 | RCV_SKB_FAIL(status); |
530 | 530 | ||
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 2371b2062c2d..16f47c675fef 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -921,6 +921,13 @@ do_replace(void __user *user, unsigned int len) | |||
921 | if (len != sizeof(tmp) + tmp.size) | 921 | if (len != sizeof(tmp) + tmp.size) |
922 | return -ENOPROTOOPT; | 922 | return -ENOPROTOOPT; |
923 | 923 | ||
924 | /* overflow check */ | ||
925 | if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS - | ||
926 | SMP_CACHE_BYTES) | ||
927 | return -ENOMEM; | ||
928 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | ||
929 | return -ENOMEM; | ||
930 | |||
924 | newinfo = xt_alloc_table_info(tmp.size); | 931 | newinfo = xt_alloc_table_info(tmp.size); |
925 | if (!newinfo) | 932 | if (!newinfo) |
926 | return -ENOMEM; | 933 | return -ENOMEM; |
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index 6606ddb66a29..cc27545ff97f 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c | |||
@@ -425,7 +425,12 @@ ipt_log_target(struct sk_buff **pskb, | |||
425 | li.u.log.level = loginfo->level; | 425 | li.u.log.level = loginfo->level; |
426 | li.u.log.logflags = loginfo->logflags; | 426 | li.u.log.logflags = loginfo->logflags; |
427 | 427 | ||
428 | nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li, loginfo->prefix); | 428 | if (loginfo->logflags & IPT_LOG_NFLOG) |
429 | nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li, | ||
430 | loginfo->prefix); | ||
431 | else | ||
432 | ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, | ||
433 | loginfo->prefix); | ||
429 | 434 | ||
430 | return IPT_CONTINUE; | 435 | return IPT_CONTINUE; |
431 | } | 436 | } |
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 641dbc477650..180a9ea57b69 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c | |||
@@ -35,6 +35,10 @@ | |||
35 | * each nlgroup you are using, so the total kernel memory usage increases | 35 | * each nlgroup you are using, so the total kernel memory usage increases |
36 | * by that factor. | 36 | * by that factor. |
37 | * | 37 | * |
38 | * Actually you should use nlbufsiz a bit smaller than PAGE_SIZE, since | ||
39 | * nlbufsiz is used with alloc_skb, which adds another | ||
40 | * sizeof(struct skb_shared_info). Use NLMSG_GOODSIZE instead. | ||
41 | * | ||
38 | * flushtimeout: | 42 | * flushtimeout: |
39 | * Specify, after how many hundredths of a second the queue should be | 43 | * Specify, after how many hundredths of a second the queue should be |
40 | * flushed even if it is not full yet. | 44 | * flushed even if it is not full yet. |
@@ -76,7 +80,7 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG); | |||
76 | 80 | ||
77 | #define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0) | 81 | #define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0) |
78 | 82 | ||
79 | static unsigned int nlbufsiz = 4096; | 83 | static unsigned int nlbufsiz = NLMSG_GOODSIZE; |
80 | module_param(nlbufsiz, uint, 0400); | 84 | module_param(nlbufsiz, uint, 0400); |
81 | MODULE_PARM_DESC(nlbufsiz, "netlink buffer size"); | 85 | MODULE_PARM_DESC(nlbufsiz, "netlink buffer size"); |
82 | 86 | ||
@@ -143,22 +147,26 @@ static void ulog_timer(unsigned long data) | |||
143 | static struct sk_buff *ulog_alloc_skb(unsigned int size) | 147 | static struct sk_buff *ulog_alloc_skb(unsigned int size) |
144 | { | 148 | { |
145 | struct sk_buff *skb; | 149 | struct sk_buff *skb; |
150 | unsigned int n; | ||
146 | 151 | ||
147 | /* alloc skb which should be big enough for a whole | 152 | /* alloc skb which should be big enough for a whole |
148 | * multipart message. WARNING: has to be <= 131000 | 153 | * multipart message. WARNING: has to be <= 131000 |
149 | * due to slab allocator restrictions */ | 154 | * due to slab allocator restrictions */ |
150 | 155 | ||
151 | skb = alloc_skb(nlbufsiz, GFP_ATOMIC); | 156 | n = max(size, nlbufsiz); |
157 | skb = alloc_skb(n, GFP_ATOMIC); | ||
152 | if (!skb) { | 158 | if (!skb) { |
153 | PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", | 159 | PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n); |
154 | nlbufsiz); | ||
155 | 160 | ||
156 | /* try to allocate only as much as we need for | 161 | if (n > size) { |
157 | * current packet */ | 162 | /* try to allocate only as much as we need for |
163 | * current packet */ | ||
158 | 164 | ||
159 | skb = alloc_skb(size, GFP_ATOMIC); | 165 | skb = alloc_skb(size, GFP_ATOMIC); |
160 | if (!skb) | 166 | if (!skb) |
161 | PRINTR("ipt_ULOG: can't even allocate %ub\n", size); | 167 | PRINTR("ipt_ULOG: can't even allocate %ub\n", |
168 | size); | ||
169 | } | ||
162 | } | 170 | } |
163 | 171 | ||
164 | return skb; | 172 | return skb; |
diff --git a/net/ipv4/netfilter/ipt_policy.c b/net/ipv4/netfilter/ipt_policy.c index 18ca8258a1c5..5a7a265280f9 100644 --- a/net/ipv4/netfilter/ipt_policy.c +++ b/net/ipv4/netfilter/ipt_policy.c | |||
@@ -26,10 +26,13 @@ MODULE_LICENSE("GPL"); | |||
26 | static inline int | 26 | static inline int |
27 | match_xfrm_state(struct xfrm_state *x, const struct ipt_policy_elem *e) | 27 | match_xfrm_state(struct xfrm_state *x, const struct ipt_policy_elem *e) |
28 | { | 28 | { |
29 | #define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) | 29 | #define MATCH_ADDR(x,y,z) (!e->match.x || \ |
30 | ((e->x.a4.s_addr == (e->y.a4.s_addr & (z))) \ | ||
31 | ^ e->invert.x)) | ||
32 | #define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) | ||
30 | 33 | ||
31 | return MATCH(saddr, x->props.saddr.a4 & e->smask) && | 34 | return MATCH_ADDR(saddr, smask, x->props.saddr.a4) && |
32 | MATCH(daddr, x->id.daddr.a4 & e->dmask) && | 35 | MATCH_ADDR(daddr, dmask, x->id.daddr.a4) && |
33 | MATCH(proto, x->id.proto) && | 36 | MATCH(proto, x->id.proto) && |
34 | MATCH(mode, x->props.mode) && | 37 | MATCH(mode, x->props.mode) && |
35 | MATCH(spi, x->id.spi) && | 38 | MATCH(spi, x->id.spi) && |
@@ -89,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info) | |||
89 | return 0; | 92 | return 0; |
90 | } | 93 | } |
91 | 94 | ||
92 | return strict ? 1 : 0; | 95 | return strict ? i == info->len : 0; |
93 | } | 96 | } |
94 | 97 | ||
95 | static int match(const struct sk_buff *skb, | 98 | static int match(const struct sk_buff *skb, |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 167619f638c6..6c8624a54933 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -529,15 +529,10 @@ static int init_or_cleanup(int init) | |||
529 | goto cleanup_localinops; | 529 | goto cleanup_localinops; |
530 | } | 530 | } |
531 | #endif | 531 | #endif |
532 | |||
533 | /* For use by REJECT target */ | ||
534 | ip_ct_attach = __nf_conntrack_attach; | ||
535 | |||
536 | return ret; | 532 | return ret; |
537 | 533 | ||
538 | cleanup: | 534 | cleanup: |
539 | synchronize_net(); | 535 | synchronize_net(); |
540 | ip_ct_attach = NULL; | ||
541 | #ifdef CONFIG_SYSCTL | 536 | #ifdef CONFIG_SYSCTL |
542 | unregister_sysctl_table(nf_ct_ipv4_sysctl_header); | 537 | unregister_sysctl_table(nf_ct_ipv4_sysctl_header); |
543 | cleanup_localinops: | 538 | cleanup_localinops: |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 39d49dc333a7..1b167c4bb3be 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto) | |||
49 | int res = 0; | 49 | int res = 0; |
50 | int cpu; | 50 | int cpu; |
51 | 51 | ||
52 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 52 | for_each_cpu(cpu) |
53 | res += proto->stats[cpu].inuse; | 53 | res += proto->stats[cpu].inuse; |
54 | 54 | ||
55 | return res; | 55 | return res; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index d82c242ea704..fca5fe0cf94a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -835,7 +835,7 @@ static int rt_garbage_collect(void) | |||
835 | int r; | 835 | int r; |
836 | 836 | ||
837 | rthp = rt_remove_balanced_route( | 837 | rthp = rt_remove_balanced_route( |
838 | &rt_hash_table[i].chain, | 838 | &rt_hash_table[k].chain, |
839 | rth, | 839 | rth, |
840 | &r); | 840 | &r); |
841 | goal -= r; | 841 | goal -= r; |
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index 63cf7e540847..e0e9d1383c7c 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c | |||
@@ -125,7 +125,7 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt, | |||
125 | /* Update AIMD parameters */ | 125 | /* Update AIMD parameters */ |
126 | if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { | 126 | if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { |
127 | while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && | 127 | while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && |
128 | ca->ai < HSTCP_AIMD_MAX) | 128 | ca->ai < HSTCP_AIMD_MAX - 1) |
129 | ca->ai++; | 129 | ca->ai++; |
130 | } else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) { | 130 | } else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) { |
131 | while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && | 131 | while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && |
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 3284cfb993e6..128de4d7c0b7 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c | |||
@@ -230,7 +230,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, | |||
230 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 230 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) |
231 | tp->snd_cwnd++; | 231 | tp->snd_cwnd++; |
232 | tp->snd_cwnd_cnt = 0; | 232 | tp->snd_cwnd_cnt = 0; |
233 | ca->ccount++; | ||
234 | } | 233 | } |
235 | } | 234 | } |
236 | } | 235 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a97ed5416c28..e9a54ae7d690 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -456,7 +456,8 @@ void tcp_rcv_space_adjust(struct sock *sk) | |||
456 | 456 | ||
457 | tp->rcvq_space.space = space; | 457 | tp->rcvq_space.space = space; |
458 | 458 | ||
459 | if (sysctl_tcp_moderate_rcvbuf) { | 459 | if (sysctl_tcp_moderate_rcvbuf && |
460 | !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { | ||
460 | int new_clamp = space; | 461 | int new_clamp = space; |
461 | 462 | ||
462 | /* Receive space grows, normalize in order to | 463 | /* Receive space grows, normalize in order to |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6ea353907af5..233bdf259965 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -236,7 +236,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
236 | if (err) | 236 | if (err) |
237 | goto failure; | 237 | goto failure; |
238 | 238 | ||
239 | err = ip_route_newports(&rt, inet->sport, inet->dport, sk); | 239 | err = ip_route_newports(&rt, IPPROTO_TCP, inet->sport, inet->dport, sk); |
240 | if (err) | 240 | if (err) |
241 | goto failure; | 241 | goto failure; |
242 | 242 | ||
@@ -1845,7 +1845,6 @@ void __init tcp_v4_init(struct net_proto_family *ops) | |||
1845 | } | 1845 | } |
1846 | 1846 | ||
1847 | EXPORT_SYMBOL(ipv4_specific); | 1847 | EXPORT_SYMBOL(ipv4_specific); |
1848 | EXPORT_SYMBOL(inet_bind_bucket_create); | ||
1849 | EXPORT_SYMBOL(tcp_hashinfo); | 1848 | EXPORT_SYMBOL(tcp_hashinfo); |
1850 | EXPORT_SYMBOL(tcp_prot); | 1849 | EXPORT_SYMBOL(tcp_prot); |
1851 | EXPORT_SYMBOL(tcp_unhash); | 1850 | EXPORT_SYMBOL(tcp_unhash); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a7623ead39a8..9f498a6c8895 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1036,6 +1036,10 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_ | |||
1036 | 1036 | ||
1037 | limit = min(send_win, cong_win); | 1037 | limit = min(send_win, cong_win); |
1038 | 1038 | ||
1039 | /* If a full-sized TSO skb can be sent, do it. */ | ||
1040 | if (limit >= 65536) | ||
1041 | return 0; | ||
1042 | |||
1039 | if (sysctl_tcp_tso_win_divisor) { | 1043 | if (sysctl_tcp_tso_win_divisor) { |
1040 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); | 1044 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); |
1041 | 1045 | ||
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index d4df0ddd424b..32ad229b4fed 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -152,10 +152,16 @@ error_nolock: | |||
152 | goto out_exit; | 152 | goto out_exit; |
153 | } | 153 | } |
154 | 154 | ||
155 | int xfrm4_output_finish(struct sk_buff *skb) | 155 | static int xfrm4_output_finish(struct sk_buff *skb) |
156 | { | 156 | { |
157 | int err; | 157 | int err; |
158 | 158 | ||
159 | #ifdef CONFIG_NETFILTER | ||
160 | if (!skb->dst->xfrm) { | ||
161 | IPCB(skb)->flags |= IPSKB_REROUTED; | ||
162 | return dst_output(skb); | ||
163 | } | ||
164 | #endif | ||
159 | while (likely((err = xfrm4_output_one(skb)) == 0)) { | 165 | while (likely((err = xfrm4_output_one(skb)) == 0)) { |
160 | nf_reset(skb); | 166 | nf_reset(skb); |
161 | 167 | ||
@@ -178,6 +184,7 @@ int xfrm4_output_finish(struct sk_buff *skb) | |||
178 | 184 | ||
179 | int xfrm4_output(struct sk_buff *skb) | 185 | int xfrm4_output(struct sk_buff *skb) |
180 | { | 186 | { |
181 | return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev, | 187 | return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev, |
182 | xfrm4_output_finish); | 188 | xfrm4_output_finish, |
189 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | ||
183 | } | 190 | } |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 42196ba3b0b9..f285bbf296e2 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -8,7 +8,6 @@ | |||
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <asm/bug.h> | ||
12 | #include <linux/compiler.h> | 11 | #include <linux/compiler.h> |
13 | #include <linux/config.h> | 12 | #include <linux/config.h> |
14 | #include <linux/inetdevice.h> | 13 | #include <linux/inetdevice.h> |
@@ -36,6 +35,7 @@ __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy) | |||
36 | if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/ | 35 | if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/ |
37 | xdst->u.rt.fl.fl4_dst == fl->fl4_dst && | 36 | xdst->u.rt.fl.fl4_dst == fl->fl4_dst && |
38 | xdst->u.rt.fl.fl4_src == fl->fl4_src && | 37 | xdst->u.rt.fl.fl4_src == fl->fl4_src && |
38 | xdst->u.rt.fl.fl4_tos == fl->fl4_tos && | ||
39 | xfrm_bundle_ok(xdst, fl, AF_INET)) { | 39 | xfrm_bundle_ok(xdst, fl, AF_INET)) { |
40 | dst_clone(dst); | 40 | dst_clone(dst); |
41 | break; | 41 | break; |
@@ -62,7 +62,8 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int | |||
62 | .nl_u = { | 62 | .nl_u = { |
63 | .ip4_u = { | 63 | .ip4_u = { |
64 | .saddr = local, | 64 | .saddr = local, |
65 | .daddr = remote | 65 | .daddr = remote, |
66 | .tos = fl->fl4_tos | ||
66 | } | 67 | } |
67 | } | 68 | } |
68 | }; | 69 | }; |
@@ -231,6 +232,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl) | |||
231 | fl->proto = iph->protocol; | 232 | fl->proto = iph->protocol; |
232 | fl->fl4_dst = iph->daddr; | 233 | fl->fl4_dst = iph->daddr; |
233 | fl->fl4_src = iph->saddr; | 234 | fl->fl4_src = iph->saddr; |
235 | fl->fl4_tos = iph->tos; | ||
234 | } | 236 | } |
235 | 237 | ||
236 | static inline int xfrm4_garbage_collect(void) | 238 | static inline int xfrm4_garbage_collect(void) |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d328d5986143..19727d941962 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -822,7 +822,7 @@ struct ipv6_saddr_score { | |||
822 | int addr_type; | 822 | int addr_type; |
823 | unsigned int attrs; | 823 | unsigned int attrs; |
824 | int matchlen; | 824 | int matchlen; |
825 | unsigned int scope; | 825 | int scope; |
826 | unsigned int rule; | 826 | unsigned int rule; |
827 | }; | 827 | }; |
828 | 828 | ||
@@ -2165,6 +2165,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
2165 | dev->name); | 2165 | dev->name); |
2166 | break; | 2166 | break; |
2167 | } | 2167 | } |
2168 | |||
2169 | if (idev) | ||
2170 | idev->if_flags |= IF_READY; | ||
2168 | } else { | 2171 | } else { |
2169 | if (!netif_carrier_ok(dev)) { | 2172 | if (!netif_carrier_ok(dev)) { |
2170 | /* device is still not ready. */ | 2173 | /* device is still not ready. */ |
@@ -3321,9 +3324,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
3321 | 3324 | ||
3322 | switch (event) { | 3325 | switch (event) { |
3323 | case RTM_NEWADDR: | 3326 | case RTM_NEWADDR: |
3324 | dst_hold(&ifp->rt->u.dst); | 3327 | ip6_ins_rt(ifp->rt, NULL, NULL, NULL); |
3325 | if (ip6_ins_rt(ifp->rt, NULL, NULL, NULL)) | ||
3326 | dst_release(&ifp->rt->u.dst); | ||
3327 | if (ifp->idev->cnf.forwarding) | 3328 | if (ifp->idev->cnf.forwarding) |
3328 | addrconf_join_anycast(ifp); | 3329 | addrconf_join_anycast(ifp); |
3329 | break; | 3330 | break; |
@@ -3334,8 +3335,6 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
3334 | dst_hold(&ifp->rt->u.dst); | 3335 | dst_hold(&ifp->rt->u.dst); |
3335 | if (ip6_del_rt(ifp->rt, NULL, NULL, NULL)) | 3336 | if (ip6_del_rt(ifp->rt, NULL, NULL, NULL)) |
3336 | dst_free(&ifp->rt->u.dst); | 3337 | dst_free(&ifp->rt->u.dst); |
3337 | else | ||
3338 | dst_release(&ifp->rt->u.dst); | ||
3339 | break; | 3338 | break; |
3340 | } | 3339 | } |
3341 | } | 3340 | } |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 064ffab82a9f..6c9711ac1c03 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -369,12 +369,6 @@ int inet6_destroy_sock(struct sock *sk) | |||
369 | struct sk_buff *skb; | 369 | struct sk_buff *skb; |
370 | struct ipv6_txoptions *opt; | 370 | struct ipv6_txoptions *opt; |
371 | 371 | ||
372 | /* | ||
373 | * Release destination entry | ||
374 | */ | ||
375 | |||
376 | sk_dst_reset(sk); | ||
377 | |||
378 | /* Release rx options */ | 372 | /* Release rx options */ |
379 | 373 | ||
380 | if ((skb = xchg(&np->pktoptions, NULL)) != NULL) | 374 | if ((skb = xchg(&np->pktoptions, NULL)) != NULL) |
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index c7932cb420a5..84963749ab77 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -279,7 +279,7 @@ static int ah6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc | |||
279 | goto out; | 279 | goto out; |
280 | memcpy(tmp_hdr, skb->nh.raw, hdr_len); | 280 | memcpy(tmp_hdr, skb->nh.raw, hdr_len); |
281 | if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len)) | 281 | if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len)) |
282 | goto out; | 282 | goto free_out; |
283 | skb->nh.ipv6h->priority = 0; | 283 | skb->nh.ipv6h->priority = 0; |
284 | skb->nh.ipv6h->flow_lbl[0] = 0; | 284 | skb->nh.ipv6h->flow_lbl[0] = 0; |
285 | skb->nh.ipv6h->flow_lbl[1] = 0; | 285 | skb->nh.ipv6h->flow_lbl[1] = 0; |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index fcf883183cef..21eb725e885f 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/net.h> | 42 | #include <linux/net.h> |
43 | #include <linux/skbuff.h> | 43 | #include <linux/skbuff.h> |
44 | #include <linux/init.h> | 44 | #include <linux/init.h> |
45 | #include <linux/netfilter.h> | ||
45 | 46 | ||
46 | #ifdef CONFIG_SYSCTL | 47 | #ifdef CONFIG_SYSCTL |
47 | #include <linux/sysctl.h> | 48 | #include <linux/sysctl.h> |
@@ -255,6 +256,7 @@ out: | |||
255 | struct icmpv6_msg { | 256 | struct icmpv6_msg { |
256 | struct sk_buff *skb; | 257 | struct sk_buff *skb; |
257 | int offset; | 258 | int offset; |
259 | uint8_t type; | ||
258 | }; | 260 | }; |
259 | 261 | ||
260 | static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) | 262 | static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) |
@@ -266,6 +268,8 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st | |||
266 | csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset, | 268 | csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset, |
267 | to, len, csum); | 269 | to, len, csum); |
268 | skb->csum = csum_block_add(skb->csum, csum, odd); | 270 | skb->csum = csum_block_add(skb->csum, csum, odd); |
271 | if (!(msg->type & ICMPV6_INFOMSG_MASK)) | ||
272 | nf_ct_attach(skb, org_skb); | ||
269 | return 0; | 273 | return 0; |
270 | } | 274 | } |
271 | 275 | ||
@@ -403,6 +407,7 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, | |||
403 | 407 | ||
404 | msg.skb = skb; | 408 | msg.skb = skb; |
405 | msg.offset = skb->nh.raw - skb->data; | 409 | msg.offset = skb->nh.raw - skb->data; |
410 | msg.type = type; | ||
406 | 411 | ||
407 | len = skb->len - msg.offset; | 412 | len = skb->len - msg.offset; |
408 | len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr)); | 413 | len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr)); |
@@ -500,6 +505,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
500 | 505 | ||
501 | msg.skb = skb; | 506 | msg.skb = skb; |
502 | msg.offset = 0; | 507 | msg.offset = 0; |
508 | msg.type = ICMPV6_ECHO_REPLY; | ||
503 | 509 | ||
504 | err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), | 510 | err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), |
505 | sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl, | 511 | sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl, |
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 4154f3a8b6cf..bb8ffb8a14c5 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c | |||
@@ -87,7 +87,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, | |||
87 | struct inet_timewait_sock **twp) | 87 | struct inet_timewait_sock **twp) |
88 | { | 88 | { |
89 | struct inet_hashinfo *hinfo = death_row->hashinfo; | 89 | struct inet_hashinfo *hinfo = death_row->hashinfo; |
90 | const struct inet_sock *inet = inet_sk(sk); | 90 | struct inet_sock *inet = inet_sk(sk); |
91 | const struct ipv6_pinfo *np = inet6_sk(sk); | 91 | const struct ipv6_pinfo *np = inet6_sk(sk); |
92 | const struct in6_addr *daddr = &np->rcv_saddr; | 92 | const struct in6_addr *daddr = &np->rcv_saddr; |
93 | const struct in6_addr *saddr = &np->daddr; | 93 | const struct in6_addr *saddr = &np->daddr; |
@@ -129,6 +129,10 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, | |||
129 | } | 129 | } |
130 | 130 | ||
131 | unique: | 131 | unique: |
132 | /* Must record num and sport now. Otherwise we will see | ||
133 | * in hash table socket with a funny identity. */ | ||
134 | inet->num = lport; | ||
135 | inet->sport = htons(lport); | ||
132 | BUG_TRAP(sk_unhashed(sk)); | 136 | BUG_TRAP(sk_unhashed(sk)); |
133 | __sk_add_node(sk, &head->chain); | 137 | __sk_add_node(sk, &head->chain); |
134 | sk->sk_hash = hash; | 138 | sk->sk_hash = hash; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index efa3e72cfcfa..5bf70b1442ea 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -494,6 +494,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
494 | struct net_device *dev; | 494 | struct net_device *dev; |
495 | struct sk_buff *frag; | 495 | struct sk_buff *frag; |
496 | struct rt6_info *rt = (struct rt6_info*)skb->dst; | 496 | struct rt6_info *rt = (struct rt6_info*)skb->dst; |
497 | struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; | ||
497 | struct ipv6hdr *tmp_hdr; | 498 | struct ipv6hdr *tmp_hdr; |
498 | struct frag_hdr *fh; | 499 | struct frag_hdr *fh; |
499 | unsigned int mtu, hlen, left, len; | 500 | unsigned int mtu, hlen, left, len; |
@@ -505,7 +506,12 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
505 | hlen = ip6_find_1stfragopt(skb, &prevhdr); | 506 | hlen = ip6_find_1stfragopt(skb, &prevhdr); |
506 | nexthdr = *prevhdr; | 507 | nexthdr = *prevhdr; |
507 | 508 | ||
508 | mtu = dst_mtu(&rt->u.dst) - hlen - sizeof(struct frag_hdr); | 509 | mtu = dst_mtu(&rt->u.dst); |
510 | if (np && np->frag_size < mtu) { | ||
511 | if (np->frag_size) | ||
512 | mtu = np->frag_size; | ||
513 | } | ||
514 | mtu -= hlen + sizeof(struct frag_hdr); | ||
509 | 515 | ||
510 | if (skb_shinfo(skb)->frag_list) { | 516 | if (skb_shinfo(skb)->frag_list) { |
511 | int first_len = skb_pagelen(skb); | 517 | int first_len = skb_pagelen(skb); |
@@ -882,7 +888,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
882 | inet->cork.fl = *fl; | 888 | inet->cork.fl = *fl; |
883 | np->cork.hop_limit = hlimit; | 889 | np->cork.hop_limit = hlimit; |
884 | np->cork.tclass = tclass; | 890 | np->cork.tclass = tclass; |
885 | inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path); | 891 | mtu = dst_mtu(rt->u.dst.path); |
892 | if (np && np->frag_size < mtu) { | ||
893 | if (np->frag_size) | ||
894 | mtu = np->frag_size; | ||
895 | } | ||
896 | inet->cork.fragsize = mtu; | ||
886 | if (dst_allfrag(rt->u.dst.path)) | 897 | if (dst_allfrag(rt->u.dst.path)) |
887 | inet->cork.flags |= IPCORK_ALLFRAG; | 898 | inet->cork.flags |= IPCORK_ALLFRAG; |
888 | inet->cork.length = 0; | 899 | inet->cork.length = 0; |
@@ -933,10 +944,11 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
933 | if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && | 944 | if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && |
934 | (rt->u.dst.dev->features & NETIF_F_UFO)) { | 945 | (rt->u.dst.dev->features & NETIF_F_UFO)) { |
935 | 946 | ||
936 | if(ip6_ufo_append_data(sk, getfrag, from, length, hh_len, | 947 | err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len, |
937 | fragheaderlen, transhdrlen, mtu, flags)) | 948 | fragheaderlen, transhdrlen, mtu, |
949 | flags); | ||
950 | if (err) | ||
938 | goto error; | 951 | goto error; |
939 | |||
940 | return 0; | 952 | return 0; |
941 | } | 953 | } |
942 | 954 | ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 92ead3cf956b..48597538db3f 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -458,7 +458,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
458 | mtu = IPV6_MIN_MTU; | 458 | mtu = IPV6_MIN_MTU; |
459 | t->dev->mtu = mtu; | 459 | t->dev->mtu = mtu; |
460 | 460 | ||
461 | if ((len = sizeof (*ipv6h) + ipv6h->payload_len) > mtu) { | 461 | if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) { |
462 | rel_type = ICMPV6_PKT_TOOBIG; | 462 | rel_type = ICMPV6_PKT_TOOBIG; |
463 | rel_code = 0; | 463 | rel_code = 0; |
464 | rel_info = mtu; | 464 | rel_info = mtu; |
@@ -884,6 +884,7 @@ ip6ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p) | |||
884 | t->parms.encap_limit = p->encap_limit; | 884 | t->parms.encap_limit = p->encap_limit; |
885 | t->parms.flowinfo = p->flowinfo; | 885 | t->parms.flowinfo = p->flowinfo; |
886 | t->parms.link = p->link; | 886 | t->parms.link = p->link; |
887 | ip6_tnl_dst_reset(t); | ||
887 | ip6ip6_tnl_link_config(t); | 888 | ip6ip6_tnl_link_config(t); |
888 | return 0; | 889 | return 0; |
889 | } | 890 | } |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 6c05c7978bef..807c021d64a2 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -1252,8 +1252,7 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1252 | } | 1252 | } |
1253 | } else { | 1253 | } else { |
1254 | for (ma = idev->mc_list; ma; ma=ma->next) { | 1254 | for (ma = idev->mc_list; ma; ma=ma->next) { |
1255 | if (group_type != IPV6_ADDR_ANY && | 1255 | if (!ipv6_addr_equal(group, &ma->mca_addr)) |
1256 | !ipv6_addr_equal(group, &ma->mca_addr)) | ||
1257 | continue; | 1256 | continue; |
1258 | spin_lock_bh(&ma->mca_lock); | 1257 | spin_lock_bh(&ma->mca_lock); |
1259 | if (ma->mca_flags & MAF_TIMER_RUNNING) { | 1258 | if (ma->mca_flags & MAF_TIMER_RUNNING) { |
@@ -1268,11 +1267,10 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1268 | ma->mca_flags &= ~MAF_GSQUERY; | 1267 | ma->mca_flags &= ~MAF_GSQUERY; |
1269 | } | 1268 | } |
1270 | if (!(ma->mca_flags & MAF_GSQUERY) || | 1269 | if (!(ma->mca_flags & MAF_GSQUERY) || |
1271 | mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs)) | 1270 | mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs)) |
1272 | igmp6_group_queried(ma, max_delay); | 1271 | igmp6_group_queried(ma, max_delay); |
1273 | spin_unlock_bh(&ma->mca_lock); | 1272 | spin_unlock_bh(&ma->mca_lock); |
1274 | if (group_type != IPV6_ADDR_ANY) | 1273 | break; |
1275 | break; | ||
1276 | } | 1274 | } |
1277 | } | 1275 | } |
1278 | read_unlock_bh(&idev->lock); | 1276 | read_unlock_bh(&idev->lock); |
@@ -1351,7 +1349,7 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, | |||
1351 | * in all filters | 1349 | * in all filters |
1352 | */ | 1350 | */ |
1353 | if (psf->sf_count[MCAST_INCLUDE]) | 1351 | if (psf->sf_count[MCAST_INCLUDE]) |
1354 | return 0; | 1352 | return type == MLD2_MODE_IS_INCLUDE; |
1355 | return pmc->mca_sfcount[MCAST_EXCLUDE] == | 1353 | return pmc->mca_sfcount[MCAST_EXCLUDE] == |
1356 | psf->sf_count[MCAST_EXCLUDE]; | 1354 | psf->sf_count[MCAST_EXCLUDE]; |
1357 | } | 1355 | } |
@@ -1966,7 +1964,7 @@ static void sf_markstate(struct ifmcaddr6 *pmc) | |||
1966 | 1964 | ||
1967 | static int sf_setstate(struct ifmcaddr6 *pmc) | 1965 | static int sf_setstate(struct ifmcaddr6 *pmc) |
1968 | { | 1966 | { |
1969 | struct ip6_sf_list *psf; | 1967 | struct ip6_sf_list *psf, *dpsf; |
1970 | int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; | 1968 | int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; |
1971 | int qrv = pmc->idev->mc_qrv; | 1969 | int qrv = pmc->idev->mc_qrv; |
1972 | int new_in, rv; | 1970 | int new_in, rv; |
@@ -1978,8 +1976,48 @@ static int sf_setstate(struct ifmcaddr6 *pmc) | |||
1978 | !psf->sf_count[MCAST_INCLUDE]; | 1976 | !psf->sf_count[MCAST_INCLUDE]; |
1979 | } else | 1977 | } else |
1980 | new_in = psf->sf_count[MCAST_INCLUDE] != 0; | 1978 | new_in = psf->sf_count[MCAST_INCLUDE] != 0; |
1981 | if (new_in != psf->sf_oldin) { | 1979 | if (new_in) { |
1982 | psf->sf_crcount = qrv; | 1980 | if (!psf->sf_oldin) { |
1981 | struct ip6_sf_list *prev = NULL; | ||
1982 | |||
1983 | for (dpsf=pmc->mca_tomb; dpsf; | ||
1984 | dpsf=dpsf->sf_next) { | ||
1985 | if (ipv6_addr_equal(&dpsf->sf_addr, | ||
1986 | &psf->sf_addr)) | ||
1987 | break; | ||
1988 | prev = dpsf; | ||
1989 | } | ||
1990 | if (dpsf) { | ||
1991 | if (prev) | ||
1992 | prev->sf_next = dpsf->sf_next; | ||
1993 | else | ||
1994 | pmc->mca_tomb = dpsf->sf_next; | ||
1995 | kfree(dpsf); | ||
1996 | } | ||
1997 | psf->sf_crcount = qrv; | ||
1998 | rv++; | ||
1999 | } | ||
2000 | } else if (psf->sf_oldin) { | ||
2001 | psf->sf_crcount = 0; | ||
2002 | /* | ||
2003 | * add or update "delete" records if an active filter | ||
2004 | * is now inactive | ||
2005 | */ | ||
2006 | for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next) | ||
2007 | if (ipv6_addr_equal(&dpsf->sf_addr, | ||
2008 | &psf->sf_addr)) | ||
2009 | break; | ||
2010 | if (!dpsf) { | ||
2011 | dpsf = (struct ip6_sf_list *) | ||
2012 | kmalloc(sizeof(*dpsf), GFP_ATOMIC); | ||
2013 | if (!dpsf) | ||
2014 | continue; | ||
2015 | *dpsf = *psf; | ||
2016 | /* pmc->mca_lock held by callers */ | ||
2017 | dpsf->sf_next = pmc->mca_tomb; | ||
2018 | pmc->mca_tomb = dpsf; | ||
2019 | } | ||
2020 | dpsf->sf_crcount = qrv; | ||
1983 | rv++; | 2021 | rv++; |
1984 | } | 2022 | } |
1985 | } | 2023 | } |
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 5027bbe6415e..af0635084df8 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c | |||
@@ -522,7 +522,7 @@ ipq_rcv_skb(struct sk_buff *skb) | |||
522 | write_unlock_bh(&queue_lock); | 522 | write_unlock_bh(&queue_lock); |
523 | 523 | ||
524 | status = ipq_receive_peer(NLMSG_DATA(nlh), type, | 524 | status = ipq_receive_peer(NLMSG_DATA(nlh), type, |
525 | skblen - NLMSG_LENGTH(0)); | 525 | nlmsglen - NLMSG_LENGTH(0)); |
526 | if (status < 0) | 526 | if (status < 0) |
527 | RCV_SKB_FAIL(status); | 527 | RCV_SKB_FAIL(status); |
528 | 528 | ||
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 847068fd3367..74ff56c322f4 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -978,6 +978,13 @@ do_replace(void __user *user, unsigned int len) | |||
978 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) | 978 | if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) |
979 | return -EFAULT; | 979 | return -EFAULT; |
980 | 980 | ||
981 | /* overflow check */ | ||
982 | if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS - | ||
983 | SMP_CACHE_BYTES) | ||
984 | return -ENOMEM; | ||
985 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | ||
986 | return -ENOMEM; | ||
987 | |||
981 | newinfo = xt_alloc_table_info(tmp.size); | 988 | newinfo = xt_alloc_table_info(tmp.size); |
982 | if (!newinfo) | 989 | if (!newinfo) |
983 | return -ENOMEM; | 990 | return -ENOMEM; |
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index 77c725832dec..6b930efa9fb9 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c | |||
@@ -436,7 +436,12 @@ ip6t_log_target(struct sk_buff **pskb, | |||
436 | li.u.log.level = loginfo->level; | 436 | li.u.log.level = loginfo->level; |
437 | li.u.log.logflags = loginfo->logflags; | 437 | li.u.log.logflags = loginfo->logflags; |
438 | 438 | ||
439 | nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, loginfo->prefix); | 439 | if (loginfo->logflags & IP6T_LOG_NFLOG) |
440 | nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, | ||
441 | loginfo->prefix); | ||
442 | else | ||
443 | ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, | ||
444 | loginfo->prefix); | ||
440 | 445 | ||
441 | return IP6T_CONTINUE; | 446 | return IP6T_CONTINUE; |
442 | } | 447 | } |
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index c745717b4ce2..0e6d1d4bbd5c 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c | |||
@@ -160,6 +160,8 @@ static void send_reset(struct sk_buff *oldskb) | |||
160 | csum_partial((char *)tcph, | 160 | csum_partial((char *)tcph, |
161 | sizeof(struct tcphdr), 0)); | 161 | sizeof(struct tcphdr), 0)); |
162 | 162 | ||
163 | nf_ct_attach(nskb, oldskb); | ||
164 | |||
163 | NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, nskb, NULL, nskb->dst->dev, | 165 | NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, nskb, NULL, nskb->dst->dev, |
164 | dst_output); | 166 | dst_output); |
165 | } | 167 | } |
diff --git a/net/ipv6/netfilter/ip6t_policy.c b/net/ipv6/netfilter/ip6t_policy.c index afe1cc4c18a5..3d39ec924041 100644 --- a/net/ipv6/netfilter/ip6t_policy.c +++ b/net/ipv6/netfilter/ip6t_policy.c | |||
@@ -26,8 +26,9 @@ MODULE_LICENSE("GPL"); | |||
26 | static inline int | 26 | static inline int |
27 | match_xfrm_state(struct xfrm_state *x, const struct ip6t_policy_elem *e) | 27 | match_xfrm_state(struct xfrm_state *x, const struct ip6t_policy_elem *e) |
28 | { | 28 | { |
29 | #define MATCH_ADDR(x,y,z) (!e->match.x || \ | 29 | #define MATCH_ADDR(x,y,z) (!e->match.x || \ |
30 | ((ip6_masked_addrcmp((z), &e->x, &e->y)) == 0) ^ e->invert.x) | 30 | ((!ip6_masked_addrcmp(&e->x.a6, &e->y.a6, z)) \ |
31 | ^ e->invert.x)) | ||
31 | #define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) | 32 | #define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) |
32 | 33 | ||
33 | return MATCH_ADDR(saddr, smask, (struct in6_addr *)&x->props.saddr.a6) && | 34 | return MATCH_ADDR(saddr, smask, (struct in6_addr *)&x->props.saddr.a6) && |
@@ -91,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ip6t_policy_info *info) | |||
91 | return 0; | 92 | return 0; |
92 | } | 93 | } |
93 | 94 | ||
94 | return strict ? 1 : 0; | 95 | return strict ? i == info->len : 0; |
95 | } | 96 | } |
96 | 97 | ||
97 | static int match(const struct sk_buff *skb, | 98 | static int match(const struct sk_buff *skb, |
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 50a13e75d70e..4238b1ed8860 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c | |||
@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto) | |||
38 | int res = 0; | 38 | int res = 0; |
39 | int cpu; | 39 | int cpu; |
40 | 40 | ||
41 | for (cpu=0; cpu<NR_CPUS; cpu++) | 41 | for_each_cpu(cpu) |
42 | res += proto->stats[cpu].inuse; | 42 | res += proto->stats[cpu].inuse; |
43 | 43 | ||
44 | return res; | 44 | return res; |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 66f1d12ea578..ae20a0ec9bd8 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/skbuff.h> | 35 | #include <linux/skbuff.h> |
36 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
37 | #include <asm/ioctls.h> | 37 | #include <asm/ioctls.h> |
38 | #include <asm/bug.h> | ||
39 | 38 | ||
40 | #include <net/ip.h> | 39 | #include <net/ip.h> |
41 | #include <net/sock.h> | 40 | #include <net/sock.h> |
@@ -804,10 +803,7 @@ back_from_confirm: | |||
804 | err = rawv6_push_pending_frames(sk, &fl, rp); | 803 | err = rawv6_push_pending_frames(sk, &fl, rp); |
805 | } | 804 | } |
806 | done: | 805 | done: |
807 | ip6_dst_store(sk, dst, | 806 | dst_release(dst); |
808 | ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ? | ||
809 | &np->daddr : NULL); | ||
810 | |||
811 | release_sock(sk); | 807 | release_sock(sk); |
812 | out: | 808 | out: |
813 | fl6_sock_release(flowlabel); | 809 | fl6_sock_release(flowlabel); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 66d04004afda..ca9cf6853755 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -515,6 +515,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
515 | done: | 515 | done: |
516 | if (opt && opt != np->opt) | 516 | if (opt && opt != np->opt) |
517 | sock_kfree_s(sk, opt, opt->tot_len); | 517 | sock_kfree_s(sk, opt, opt->tot_len); |
518 | dst_release(dst); | ||
518 | return err; | 519 | return err; |
519 | } | 520 | } |
520 | 521 | ||
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 69bd957380e7..91cce8b2d7a5 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -11,7 +11,6 @@ | |||
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <asm/bug.h> | ||
15 | #include <linux/compiler.h> | 14 | #include <linux/compiler.h> |
16 | #include <linux/config.h> | 15 | #include <linux/config.h> |
17 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c index 890bac0d4a56..e3debbdb67f5 100644 --- a/net/irda/irda_device.c +++ b/net/irda/irda_device.c | |||
@@ -343,12 +343,12 @@ static void irda_task_timer_expired(void *data) | |||
343 | static void irda_device_setup(struct net_device *dev) | 343 | static void irda_device_setup(struct net_device *dev) |
344 | { | 344 | { |
345 | dev->hard_header_len = 0; | 345 | dev->hard_header_len = 0; |
346 | dev->addr_len = 0; | 346 | dev->addr_len = LAP_ALEN; |
347 | 347 | ||
348 | dev->type = ARPHRD_IRDA; | 348 | dev->type = ARPHRD_IRDA; |
349 | dev->tx_queue_len = 8; /* Window size + 1 s-frame */ | 349 | dev->tx_queue_len = 8; /* Window size + 1 s-frame */ |
350 | 350 | ||
351 | memset(dev->broadcast, 0xff, 4); | 351 | memset(dev->broadcast, 0xff, LAP_ALEN); |
352 | 352 | ||
353 | dev->mtu = 2048; | 353 | dev->mtu = 2048; |
354 | dev->flags = IFF_NOARP; | 354 | dev->flags = IFF_NOARP; |
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c index 07ec326c71f5..f65c7a83bc5c 100644 --- a/net/irda/irnet/irnet_irda.c +++ b/net/irda/irnet/irnet_irda.c | |||
@@ -696,7 +696,7 @@ irnet_daddr_to_dname(irnet_socket * self) | |||
696 | { | 696 | { |
697 | /* Yes !!! Get it.. */ | 697 | /* Yes !!! Get it.. */ |
698 | strlcpy(self->rname, discoveries[i].info, sizeof(self->rname)); | 698 | strlcpy(self->rname, discoveries[i].info, sizeof(self->rname)); |
699 | self->rname[NICKNAME_MAX_LEN + 1] = '\0'; | 699 | self->rname[sizeof(self->rname) - 1] = '\0'; |
700 | DEBUG(IRDA_SERV_INFO, "Device 0x%08x is in fact ``%s''.\n", | 700 | DEBUG(IRDA_SERV_INFO, "Device 0x%08x is in fact ``%s''.\n", |
701 | self->daddr, self->rname); | 701 | self->daddr, self->rname); |
702 | kfree(discoveries); | 702 | kfree(discoveries); |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 43f1ce74187d..b2d4d1dd2116 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -1423,7 +1423,7 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, | |||
1423 | 1423 | ||
1424 | if (err < 0) { | 1424 | if (err < 0) { |
1425 | x->km.state = XFRM_STATE_DEAD; | 1425 | x->km.state = XFRM_STATE_DEAD; |
1426 | xfrm_state_put(x); | 1426 | __xfrm_state_put(x); |
1427 | goto out; | 1427 | goto out; |
1428 | } | 1428 | } |
1429 | 1429 | ||
@@ -1620,6 +1620,7 @@ static int key_notify_sa_flush(struct km_event *c) | |||
1620 | return -ENOBUFS; | 1620 | return -ENOBUFS; |
1621 | hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); | 1621 | hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); |
1622 | hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto); | 1622 | hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto); |
1623 | hdr->sadb_msg_type = SADB_FLUSH; | ||
1623 | hdr->sadb_msg_seq = c->seq; | 1624 | hdr->sadb_msg_seq = c->seq; |
1624 | hdr->sadb_msg_pid = c->pid; | 1625 | hdr->sadb_msg_pid = c->pid; |
1625 | hdr->sadb_msg_version = PF_KEY_V2; | 1626 | hdr->sadb_msg_version = PF_KEY_V2; |
@@ -2385,6 +2386,7 @@ static int key_notify_policy_flush(struct km_event *c) | |||
2385 | if (!skb_out) | 2386 | if (!skb_out) |
2386 | return -ENOBUFS; | 2387 | return -ENOBUFS; |
2387 | hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); | 2388 | hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); |
2389 | hdr->sadb_msg_type = SADB_X_SPDFLUSH; | ||
2388 | hdr->sadb_msg_seq = c->seq; | 2390 | hdr->sadb_msg_seq = c->seq; |
2389 | hdr->sadb_msg_pid = c->pid; | 2391 | hdr->sadb_msg_pid = c->pid; |
2390 | hdr->sadb_msg_version = PF_KEY_V2; | 2392 | hdr->sadb_msg_version = PF_KEY_V2; |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 99c0a0fa4a97..a8e5544da93e 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -102,8 +102,6 @@ config NF_CT_NETLINK | |||
102 | help | 102 | help |
103 | This option enables support for a netlink-based userspace interface | 103 | This option enables support for a netlink-based userspace interface |
104 | 104 | ||
105 | endmenu | ||
106 | |||
107 | config NETFILTER_XTABLES | 105 | config NETFILTER_XTABLES |
108 | tristate "Netfilter Xtables support (required for ip_tables)" | 106 | tristate "Netfilter Xtables support (required for ip_tables)" |
109 | help | 107 | help |
@@ -128,7 +126,7 @@ config NETFILTER_XT_TARGET_CONNMARK | |||
128 | tristate '"CONNMARK" target support' | 126 | tristate '"CONNMARK" target support' |
129 | depends on NETFILTER_XTABLES | 127 | depends on NETFILTER_XTABLES |
130 | depends on IP_NF_MANGLE || IP6_NF_MANGLE | 128 | depends on IP_NF_MANGLE || IP6_NF_MANGLE |
131 | depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4) | 129 | depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK) |
132 | help | 130 | help |
133 | This option adds a `CONNMARK' target, which allows one to manipulate | 131 | This option adds a `CONNMARK' target, which allows one to manipulate |
134 | the connection mark value. Similar to the MARK target, but | 132 | the connection mark value. Similar to the MARK target, but |
@@ -189,7 +187,7 @@ config NETFILTER_XT_MATCH_COMMENT | |||
189 | config NETFILTER_XT_MATCH_CONNBYTES | 187 | config NETFILTER_XT_MATCH_CONNBYTES |
190 | tristate '"connbytes" per-connection counter match support' | 188 | tristate '"connbytes" per-connection counter match support' |
191 | depends on NETFILTER_XTABLES | 189 | depends on NETFILTER_XTABLES |
192 | depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || NF_CT_ACCT | 190 | depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || (NF_CT_ACCT && NF_CONNTRACK) |
193 | help | 191 | help |
194 | This option adds a `connbytes' match, which allows you to match the | 192 | This option adds a `connbytes' match, which allows you to match the |
195 | number of bytes and/or packets for each direction within a connection. | 193 | number of bytes and/or packets for each direction within a connection. |
@@ -200,7 +198,7 @@ config NETFILTER_XT_MATCH_CONNBYTES | |||
200 | config NETFILTER_XT_MATCH_CONNMARK | 198 | config NETFILTER_XT_MATCH_CONNMARK |
201 | tristate '"connmark" connection mark match support' | 199 | tristate '"connmark" connection mark match support' |
202 | depends on NETFILTER_XTABLES | 200 | depends on NETFILTER_XTABLES |
203 | depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || NF_CONNTRACK_MARK | 201 | depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK) |
204 | help | 202 | help |
205 | This option adds a `connmark' match, which allows you to match the | 203 | This option adds a `connmark' match, which allows you to match the |
206 | connection mark value previously set for the session by `CONNMARK'. | 204 | connection mark value previously set for the session by `CONNMARK'. |
@@ -361,3 +359,5 @@ config NETFILTER_XT_MATCH_TCPMSS | |||
361 | 359 | ||
362 | To compile it as a module, choose M here. If unsure, say N. | 360 | To compile it as a module, choose M here. If unsure, say N. |
363 | 361 | ||
362 | endmenu | ||
363 | |||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 62bb509f05d4..d622ddf08bb0 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -188,7 +188,7 @@ extern struct nf_conntrack_protocol nf_conntrack_generic_protocol; | |||
188 | struct nf_conntrack_protocol * | 188 | struct nf_conntrack_protocol * |
189 | __nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol) | 189 | __nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol) |
190 | { | 190 | { |
191 | if (unlikely(nf_ct_protos[l3proto] == NULL)) | 191 | if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL)) |
192 | return &nf_conntrack_generic_protocol; | 192 | return &nf_conntrack_generic_protocol; |
193 | 193 | ||
194 | return nf_ct_protos[l3proto][protocol]; | 194 | return nf_ct_protos[l3proto][protocol]; |
@@ -1556,6 +1556,8 @@ void nf_conntrack_cleanup(void) | |||
1556 | { | 1556 | { |
1557 | int i; | 1557 | int i; |
1558 | 1558 | ||
1559 | ip_ct_attach = NULL; | ||
1560 | |||
1559 | /* This makes sure all current packets have passed through | 1561 | /* This makes sure all current packets have passed through |
1560 | netfilter framework. Roll on, two-stage module | 1562 | netfilter framework. Roll on, two-stage module |
1561 | delete... */ | 1563 | delete... */ |
@@ -1715,6 +1717,9 @@ int __init nf_conntrack_init(void) | |||
1715 | nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto; | 1717 | nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto; |
1716 | write_unlock_bh(&nf_conntrack_lock); | 1718 | write_unlock_bh(&nf_conntrack_lock); |
1717 | 1719 | ||
1720 | /* For use by REJECT target */ | ||
1721 | ip_ct_attach = __nf_conntrack_attach; | ||
1722 | |||
1718 | /* Set up fake conntrack: | 1723 | /* Set up fake conntrack: |
1719 | - to never be deleted, not in any hashes */ | 1724 | - to never be deleted, not in any hashes */ |
1720 | atomic_set(&nf_conntrack_untracked.ct_general.use, 1); | 1725 | atomic_set(&nf_conntrack_untracked.ct_general.use, 1); |
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index ab0c920f0d30..6f210f399762 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c | |||
@@ -657,8 +657,6 @@ static int __init init(void) | |||
657 | /* FIXME should be configurable whether IPv4 and IPv6 FTP connections | 657 | /* FIXME should be configurable whether IPv4 and IPv6 FTP connections |
658 | are tracked or not - YK */ | 658 | are tracked or not - YK */ |
659 | for (i = 0; i < ports_c; i++) { | 659 | for (i = 0; i < ports_c; i++) { |
660 | memset(&ftp[i], 0, sizeof(struct nf_conntrack_helper)); | ||
661 | |||
662 | ftp[i][0].tuple.src.l3num = PF_INET; | 660 | ftp[i][0].tuple.src.l3num = PF_INET; |
663 | ftp[i][1].tuple.src.l3num = PF_INET6; | 661 | ftp[i][1].tuple.src.l3num = PF_INET6; |
664 | for (j = 0; j < 2; j++) { | 662 | for (j = 0; j < 2; j++) { |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 73ab16bc7d40..9ff3463037e1 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -1232,7 +1232,7 @@ static int ctnetlink_expect_event(struct notifier_block *this, | |||
1232 | 1232 | ||
1233 | b = skb->tail; | 1233 | b = skb->tail; |
1234 | 1234 | ||
1235 | type |= NFNL_SUBSYS_CTNETLINK << 8; | 1235 | type |= NFNL_SUBSYS_CTNETLINK_EXP << 8; |
1236 | nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); | 1236 | nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg)); |
1237 | nfmsg = NLMSG_DATA(nlh); | 1237 | nfmsg = NLMSG_DATA(nlh); |
1238 | 1238 | ||
@@ -1589,6 +1589,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = { | |||
1589 | }; | 1589 | }; |
1590 | 1590 | ||
1591 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); | 1591 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); |
1592 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); | ||
1592 | 1593 | ||
1593 | static int __init ctnetlink_init(void) | 1594 | static int __init ctnetlink_init(void) |
1594 | { | 1595 | { |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index df99138c3b3b..6492ed66fb3c 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -864,7 +864,9 @@ static int csum6(const struct sk_buff *skb, unsigned int dataoff) | |||
864 | { | 864 | { |
865 | return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, | 865 | return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, |
866 | skb->len - dataoff, IPPROTO_TCP, | 866 | skb->len - dataoff, IPPROTO_TCP, |
867 | skb->ip_summed == CHECKSUM_HW ? skb->csum | 867 | skb->ip_summed == CHECKSUM_HW |
868 | ? csum_sub(skb->csum, | ||
869 | skb_checksum(skb, 0, dataoff, 0)) | ||
868 | : skb_checksum(skb, dataoff, skb->len - dataoff, | 870 | : skb_checksum(skb, dataoff, skb->len - dataoff, |
869 | 0)); | 871 | 0)); |
870 | } | 872 | } |
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 4264dd079a16..831d206344e0 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c | |||
@@ -161,7 +161,9 @@ static int csum6(const struct sk_buff *skb, unsigned int dataoff) | |||
161 | { | 161 | { |
162 | return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, | 162 | return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, |
163 | skb->len - dataoff, IPPROTO_UDP, | 163 | skb->len - dataoff, IPPROTO_UDP, |
164 | skb->ip_summed == CHECKSUM_HW ? skb->csum | 164 | skb->ip_summed == CHECKSUM_HW |
165 | ? csum_sub(skb->csum, | ||
166 | skb_checksum(skb, 0, dataoff, 0)) | ||
165 | : skb_checksum(skb, dataoff, skb->len - dataoff, | 167 | : skb_checksum(skb, dataoff, skb->len - dataoff, |
166 | 0)); | 168 | 0)); |
167 | } | 169 | } |
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index d3a4f30a7f22..d9f0d7ef103b 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/skbuff.h> | 6 | #include <linux/skbuff.h> |
7 | #include <linux/netfilter.h> | 7 | #include <linux/netfilter.h> |
8 | #include <linux/seq_file.h> | 8 | #include <linux/seq_file.h> |
9 | #include <linux/rcupdate.h> | ||
9 | #include <net/protocol.h> | 10 | #include <net/protocol.h> |
10 | 11 | ||
11 | #include "nf_internals.h" | 12 | #include "nf_internals.h" |
@@ -16,7 +17,7 @@ | |||
16 | * for queueing and must reinject all packets it receives, no matter what. | 17 | * for queueing and must reinject all packets it receives, no matter what. |
17 | */ | 18 | */ |
18 | static struct nf_queue_handler *queue_handler[NPROTO]; | 19 | static struct nf_queue_handler *queue_handler[NPROTO]; |
19 | static struct nf_queue_rerouter *queue_rerouter; | 20 | static struct nf_queue_rerouter *queue_rerouter[NPROTO]; |
20 | 21 | ||
21 | static DEFINE_RWLOCK(queue_handler_lock); | 22 | static DEFINE_RWLOCK(queue_handler_lock); |
22 | 23 | ||
@@ -64,7 +65,7 @@ int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer) | |||
64 | return -EINVAL; | 65 | return -EINVAL; |
65 | 66 | ||
66 | write_lock_bh(&queue_handler_lock); | 67 | write_lock_bh(&queue_handler_lock); |
67 | memcpy(&queue_rerouter[pf], rer, sizeof(queue_rerouter[pf])); | 68 | rcu_assign_pointer(queue_rerouter[pf], rer); |
68 | write_unlock_bh(&queue_handler_lock); | 69 | write_unlock_bh(&queue_handler_lock); |
69 | 70 | ||
70 | return 0; | 71 | return 0; |
@@ -77,8 +78,9 @@ int nf_unregister_queue_rerouter(int pf) | |||
77 | return -EINVAL; | 78 | return -EINVAL; |
78 | 79 | ||
79 | write_lock_bh(&queue_handler_lock); | 80 | write_lock_bh(&queue_handler_lock); |
80 | memset(&queue_rerouter[pf], 0, sizeof(queue_rerouter[pf])); | 81 | rcu_assign_pointer(queue_rerouter[pf], NULL); |
81 | write_unlock_bh(&queue_handler_lock); | 82 | write_unlock_bh(&queue_handler_lock); |
83 | synchronize_rcu(); | ||
82 | return 0; | 84 | return 0; |
83 | } | 85 | } |
84 | EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter); | 86 | EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter); |
@@ -114,16 +116,17 @@ int nf_queue(struct sk_buff **skb, | |||
114 | struct net_device *physindev = NULL; | 116 | struct net_device *physindev = NULL; |
115 | struct net_device *physoutdev = NULL; | 117 | struct net_device *physoutdev = NULL; |
116 | #endif | 118 | #endif |
119 | struct nf_queue_rerouter *rerouter; | ||
117 | 120 | ||
118 | /* QUEUE == DROP if noone is waiting, to be safe. */ | 121 | /* QUEUE == DROP if noone is waiting, to be safe. */ |
119 | read_lock(&queue_handler_lock); | 122 | read_lock(&queue_handler_lock); |
120 | if (!queue_handler[pf] || !queue_handler[pf]->outfn) { | 123 | if (!queue_handler[pf]) { |
121 | read_unlock(&queue_handler_lock); | 124 | read_unlock(&queue_handler_lock); |
122 | kfree_skb(*skb); | 125 | kfree_skb(*skb); |
123 | return 1; | 126 | return 1; |
124 | } | 127 | } |
125 | 128 | ||
126 | info = kmalloc(sizeof(*info)+queue_rerouter[pf].rer_size, GFP_ATOMIC); | 129 | info = kmalloc(sizeof(*info)+queue_rerouter[pf]->rer_size, GFP_ATOMIC); |
127 | if (!info) { | 130 | if (!info) { |
128 | if (net_ratelimit()) | 131 | if (net_ratelimit()) |
129 | printk(KERN_ERR "OOM queueing packet %p\n", | 132 | printk(KERN_ERR "OOM queueing packet %p\n", |
@@ -155,15 +158,13 @@ int nf_queue(struct sk_buff **skb, | |||
155 | if (physoutdev) dev_hold(physoutdev); | 158 | if (physoutdev) dev_hold(physoutdev); |
156 | } | 159 | } |
157 | #endif | 160 | #endif |
158 | if (queue_rerouter[pf].save) | 161 | rerouter = rcu_dereference(queue_rerouter[pf]); |
159 | queue_rerouter[pf].save(*skb, info); | 162 | if (rerouter) |
163 | rerouter->save(*skb, info); | ||
160 | 164 | ||
161 | status = queue_handler[pf]->outfn(*skb, info, queuenum, | 165 | status = queue_handler[pf]->outfn(*skb, info, queuenum, |
162 | queue_handler[pf]->data); | 166 | queue_handler[pf]->data); |
163 | 167 | ||
164 | if (status >= 0 && queue_rerouter[pf].reroute) | ||
165 | status = queue_rerouter[pf].reroute(skb, info); | ||
166 | |||
167 | read_unlock(&queue_handler_lock); | 168 | read_unlock(&queue_handler_lock); |
168 | 169 | ||
169 | if (status < 0) { | 170 | if (status < 0) { |
@@ -189,6 +190,7 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info, | |||
189 | { | 190 | { |
190 | struct list_head *elem = &info->elem->list; | 191 | struct list_head *elem = &info->elem->list; |
191 | struct list_head *i; | 192 | struct list_head *i; |
193 | struct nf_queue_rerouter *rerouter; | ||
192 | 194 | ||
193 | rcu_read_lock(); | 195 | rcu_read_lock(); |
194 | 196 | ||
@@ -212,7 +214,7 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info, | |||
212 | break; | 214 | break; |
213 | } | 215 | } |
214 | 216 | ||
215 | if (elem == &nf_hooks[info->pf][info->hook]) { | 217 | if (i == &nf_hooks[info->pf][info->hook]) { |
216 | /* The module which sent it to userspace is gone. */ | 218 | /* The module which sent it to userspace is gone. */ |
217 | NFDEBUG("%s: module disappeared, dropping packet.\n", | 219 | NFDEBUG("%s: module disappeared, dropping packet.\n", |
218 | __FUNCTION__); | 220 | __FUNCTION__); |
@@ -226,6 +228,12 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info, | |||
226 | } | 228 | } |
227 | 229 | ||
228 | if (verdict == NF_ACCEPT) { | 230 | if (verdict == NF_ACCEPT) { |
231 | rerouter = rcu_dereference(queue_rerouter[info->pf]); | ||
232 | if (rerouter && rerouter->reroute(&skb, info) < 0) | ||
233 | verdict = NF_DROP; | ||
234 | } | ||
235 | |||
236 | if (verdict == NF_ACCEPT) { | ||
229 | next_hook: | 237 | next_hook: |
230 | verdict = nf_iterate(&nf_hooks[info->pf][info->hook], | 238 | verdict = nf_iterate(&nf_hooks[info->pf][info->hook], |
231 | &skb, info->hook, | 239 | &skb, info->hook, |
@@ -322,22 +330,12 @@ int __init netfilter_queue_init(void) | |||
322 | { | 330 | { |
323 | #ifdef CONFIG_PROC_FS | 331 | #ifdef CONFIG_PROC_FS |
324 | struct proc_dir_entry *pde; | 332 | struct proc_dir_entry *pde; |
325 | #endif | ||
326 | queue_rerouter = kmalloc(NPROTO * sizeof(struct nf_queue_rerouter), | ||
327 | GFP_KERNEL); | ||
328 | if (!queue_rerouter) | ||
329 | return -ENOMEM; | ||
330 | 333 | ||
331 | #ifdef CONFIG_PROC_FS | ||
332 | pde = create_proc_entry("nf_queue", S_IRUGO, proc_net_netfilter); | 334 | pde = create_proc_entry("nf_queue", S_IRUGO, proc_net_netfilter); |
333 | if (!pde) { | 335 | if (!pde) |
334 | kfree(queue_rerouter); | ||
335 | return -1; | 336 | return -1; |
336 | } | ||
337 | pde->proc_fops = &nfqueue_file_ops; | 337 | pde->proc_fops = &nfqueue_file_ops; |
338 | #endif | 338 | #endif |
339 | memset(queue_rerouter, 0, NPROTO * sizeof(struct nf_queue_rerouter)); | ||
340 | |||
341 | return 0; | 339 | return 0; |
342 | } | 340 | } |
343 | 341 | ||
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index e10512e229b6..3b3c781b40c0 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include "../bridge/br_private.h" | 37 | #include "../bridge/br_private.h" |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #define NFULNL_NLBUFSIZ_DEFAULT 4096 | 40 | #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE |
41 | #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ | 41 | #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ |
42 | #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ | 42 | #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ |
43 | 43 | ||
@@ -314,24 +314,28 @@ static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size, | |||
314 | unsigned int pkt_size) | 314 | unsigned int pkt_size) |
315 | { | 315 | { |
316 | struct sk_buff *skb; | 316 | struct sk_buff *skb; |
317 | unsigned int n; | ||
317 | 318 | ||
318 | UDEBUG("entered (%u, %u)\n", inst_size, pkt_size); | 319 | UDEBUG("entered (%u, %u)\n", inst_size, pkt_size); |
319 | 320 | ||
320 | /* alloc skb which should be big enough for a whole multipart | 321 | /* alloc skb which should be big enough for a whole multipart |
321 | * message. WARNING: has to be <= 128k due to slab restrictions */ | 322 | * message. WARNING: has to be <= 128k due to slab restrictions */ |
322 | 323 | ||
323 | skb = alloc_skb(inst_size, GFP_ATOMIC); | 324 | n = max(inst_size, pkt_size); |
325 | skb = alloc_skb(n, GFP_ATOMIC); | ||
324 | if (!skb) { | 326 | if (!skb) { |
325 | PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n", | 327 | PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n", |
326 | inst_size); | 328 | inst_size); |
327 | 329 | ||
328 | /* try to allocate only as much as we need for current | 330 | if (n > pkt_size) { |
329 | * packet */ | 331 | /* try to allocate only as much as we need for current |
332 | * packet */ | ||
330 | 333 | ||
331 | skb = alloc_skb(pkt_size, GFP_ATOMIC); | 334 | skb = alloc_skb(pkt_size, GFP_ATOMIC); |
332 | if (!skb) | 335 | if (!skb) |
333 | PRINTR("nfnetlink_log: can't even alloc %u bytes\n", | 336 | PRINTR("nfnetlink_log: can't even alloc %u " |
334 | pkt_size); | 337 | "bytes\n", pkt_size); |
338 | } | ||
335 | } | 339 | } |
336 | 340 | ||
337 | return skb; | 341 | return skb; |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 18ed9c5d209c..2cf5fb8322c4 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -825,7 +825,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, | |||
825 | } | 825 | } |
826 | 826 | ||
827 | if (nfqa[NFQA_MARK-1]) | 827 | if (nfqa[NFQA_MARK-1]) |
828 | skb->nfmark = ntohl(*(u_int32_t *)NFA_DATA(nfqa[NFQA_MARK-1])); | 828 | entry->skb->nfmark = ntohl(*(u_int32_t *) |
829 | NFA_DATA(nfqa[NFQA_MARK-1])); | ||
829 | 830 | ||
830 | issue_verdict(entry, verdict); | 831 | issue_verdict(entry, verdict); |
831 | instance_put(queue); | 832 | instance_put(queue); |
@@ -927,8 +928,12 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
927 | 928 | ||
928 | if (nfqa[NFQA_CFG_PARAMS-1]) { | 929 | if (nfqa[NFQA_CFG_PARAMS-1]) { |
929 | struct nfqnl_msg_config_params *params; | 930 | struct nfqnl_msg_config_params *params; |
930 | params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]); | ||
931 | 931 | ||
932 | if (!queue) { | ||
933 | ret = -ENOENT; | ||
934 | goto out_put; | ||
935 | } | ||
936 | params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]); | ||
932 | nfqnl_set_mode(queue, params->copy_mode, | 937 | nfqnl_set_mode(queue, params->copy_mode, |
933 | ntohl(params->copy_range)); | 938 | ntohl(params->copy_range)); |
934 | } | 939 | } |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2101b45d2ec6..59dc7d140600 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -702,7 +702,8 @@ struct sock *netlink_getsockbyfilp(struct file *filp) | |||
702 | * 0: continue | 702 | * 0: continue |
703 | * 1: repeat lookup - reference dropped while waiting for socket memory. | 703 | * 1: repeat lookup - reference dropped while waiting for socket memory. |
704 | */ | 704 | */ |
705 | int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo) | 705 | int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, |
706 | long timeo, struct sock *ssk) | ||
706 | { | 707 | { |
707 | struct netlink_sock *nlk; | 708 | struct netlink_sock *nlk; |
708 | 709 | ||
@@ -712,7 +713,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long t | |||
712 | test_bit(0, &nlk->state)) { | 713 | test_bit(0, &nlk->state)) { |
713 | DECLARE_WAITQUEUE(wait, current); | 714 | DECLARE_WAITQUEUE(wait, current); |
714 | if (!timeo) { | 715 | if (!timeo) { |
715 | if (!nlk->pid) | 716 | if (!ssk || nlk_sk(ssk)->pid == 0) |
716 | netlink_overrun(sk); | 717 | netlink_overrun(sk); |
717 | sock_put(sk); | 718 | sock_put(sk); |
718 | kfree_skb(skb); | 719 | kfree_skb(skb); |
@@ -797,7 +798,7 @@ retry: | |||
797 | kfree_skb(skb); | 798 | kfree_skb(skb); |
798 | return PTR_ERR(sk); | 799 | return PTR_ERR(sk); |
799 | } | 800 | } |
800 | err = netlink_attachskb(sk, skb, nonblock, timeo); | 801 | err = netlink_attachskb(sk, skb, nonblock, timeo, ssk); |
801 | if (err == 1) | 802 | if (err == 1) |
802 | goto retry; | 803 | goto retry; |
803 | if (err) | 804 | if (err) |
@@ -1193,6 +1194,9 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1193 | msg->msg_namelen = sizeof(*addr); | 1194 | msg->msg_namelen = sizeof(*addr); |
1194 | } | 1195 | } |
1195 | 1196 | ||
1197 | if (nlk->flags & NETLINK_RECV_PKTINFO) | ||
1198 | netlink_cmsg_recv_pktinfo(msg, skb); | ||
1199 | |||
1196 | if (NULL == siocb->scm) { | 1200 | if (NULL == siocb->scm) { |
1197 | memset(&scm, 0, sizeof(scm)); | 1201 | memset(&scm, 0, sizeof(scm)); |
1198 | siocb->scm = &scm; | 1202 | siocb->scm = &scm; |
@@ -1204,8 +1208,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1204 | netlink_dump(sk); | 1208 | netlink_dump(sk); |
1205 | 1209 | ||
1206 | scm_recv(sock, msg, siocb->scm, flags); | 1210 | scm_recv(sock, msg, siocb->scm, flags); |
1207 | if (nlk->flags & NETLINK_RECV_PKTINFO) | ||
1208 | netlink_cmsg_recv_pktinfo(msg, skb); | ||
1209 | 1211 | ||
1210 | out: | 1212 | out: |
1211 | netlink_rcv_wake(sk); | 1213 | netlink_rcv_wake(sk); |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 4ae1538c54a9..43e72419c868 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -238,7 +238,7 @@ int genl_register_family(struct genl_family *family) | |||
238 | sizeof(struct nlattr *), GFP_KERNEL); | 238 | sizeof(struct nlattr *), GFP_KERNEL); |
239 | if (family->attrbuf == NULL) { | 239 | if (family->attrbuf == NULL) { |
240 | err = -ENOMEM; | 240 | err = -ENOMEM; |
241 | goto errout; | 241 | goto errout_locked; |
242 | } | 242 | } |
243 | } else | 243 | } else |
244 | family->attrbuf = NULL; | 244 | family->attrbuf = NULL; |
@@ -288,7 +288,7 @@ int genl_unregister_family(struct genl_family *family) | |||
288 | return -ENOENT; | 288 | return -ENOENT; |
289 | } | 289 | } |
290 | 290 | ||
291 | static inline int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, | 291 | static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, |
292 | int *errp) | 292 | int *errp) |
293 | { | 293 | { |
294 | struct genl_ops *ops; | 294 | struct genl_ops *ops; |
@@ -375,7 +375,7 @@ static void genl_rcv(struct sock *sk, int len) | |||
375 | do { | 375 | do { |
376 | if (genl_trylock()) | 376 | if (genl_trylock()) |
377 | return; | 377 | return; |
378 | netlink_run_queue(sk, &qlen, &genl_rcv_msg); | 378 | netlink_run_queue(sk, &qlen, genl_rcv_msg); |
379 | genl_unlock(); | 379 | genl_unlock(); |
380 | } while (qlen && genl_sock && genl_sock->sk_receive_queue.qlen); | 380 | } while (qlen && genl_sock && genl_sock->sk_receive_queue.qlen); |
381 | } | 381 | } |
@@ -549,10 +549,8 @@ static int __init genl_init(void) | |||
549 | netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV); | 549 | netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV); |
550 | genl_sock = netlink_kernel_create(NETLINK_GENERIC, GENL_MAX_ID, | 550 | genl_sock = netlink_kernel_create(NETLINK_GENERIC, GENL_MAX_ID, |
551 | genl_rcv, THIS_MODULE); | 551 | genl_rcv, THIS_MODULE); |
552 | if (genl_sock == NULL) { | 552 | if (genl_sock == NULL) |
553 | panic("GENL: Cannot initialize generic netlink\n"); | 553 | panic("GENL: Cannot initialize generic netlink\n"); |
554 | return -ENOMEM; | ||
555 | } | ||
556 | 554 | ||
557 | return 0; | 555 | return 0; |
558 | 556 | ||
@@ -560,7 +558,6 @@ errout_register: | |||
560 | genl_unregister_family(&genl_ctrl); | 558 | genl_unregister_family(&genl_ctrl); |
561 | errout: | 559 | errout: |
562 | panic("GENL: Cannot register controller: %d\n", err); | 560 | panic("GENL: Cannot register controller: %d\n", err); |
563 | return err; | ||
564 | } | 561 | } |
565 | 562 | ||
566 | subsys_initcall(genl_init); | 563 | subsys_initcall(genl_init); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ee93abc71cb8..9db7dbdb16e6 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -365,7 +365,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, | |||
365 | */ | 365 | */ |
366 | 366 | ||
367 | err = -EMSGSIZE; | 367 | err = -EMSGSIZE; |
368 | if(len>dev->mtu+dev->hard_header_len) | 368 | if (len > dev->mtu + dev->hard_header_len) |
369 | goto out_unlock; | 369 | goto out_unlock; |
370 | 370 | ||
371 | err = -ENOBUFS; | 371 | err = -ENOBUFS; |
@@ -935,7 +935,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add | |||
935 | * Check legality | 935 | * Check legality |
936 | */ | 936 | */ |
937 | 937 | ||
938 | if(addr_len!=sizeof(struct sockaddr)) | 938 | if (addr_len != sizeof(struct sockaddr)) |
939 | return -EINVAL; | 939 | return -EINVAL; |
940 | strlcpy(name,uaddr->sa_data,sizeof(name)); | 940 | strlcpy(name,uaddr->sa_data,sizeof(name)); |
941 | 941 | ||
@@ -1092,7 +1092,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1092 | * retries. | 1092 | * retries. |
1093 | */ | 1093 | */ |
1094 | 1094 | ||
1095 | if(skb==NULL) | 1095 | if (skb == NULL) |
1096 | goto out; | 1096 | goto out; |
1097 | 1097 | ||
1098 | /* | 1098 | /* |
@@ -1392,8 +1392,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
1392 | if (level != SOL_PACKET) | 1392 | if (level != SOL_PACKET) |
1393 | return -ENOPROTOOPT; | 1393 | return -ENOPROTOOPT; |
1394 | 1394 | ||
1395 | if (get_user(len,optlen)) | 1395 | if (get_user(len, optlen)) |
1396 | return -EFAULT; | 1396 | return -EFAULT; |
1397 | 1397 | ||
1398 | if (len < 0) | 1398 | if (len < 0) |
1399 | return -EINVAL; | 1399 | return -EINVAL; |
@@ -1419,9 +1419,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
1419 | return -ENOPROTOOPT; | 1419 | return -ENOPROTOOPT; |
1420 | } | 1420 | } |
1421 | 1421 | ||
1422 | if (put_user(len, optlen)) | 1422 | if (put_user(len, optlen)) |
1423 | return -EFAULT; | 1423 | return -EFAULT; |
1424 | return 0; | 1424 | return 0; |
1425 | } | 1425 | } |
1426 | 1426 | ||
1427 | 1427 | ||
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 792ce59940ec..2ffa11c6e8de 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -707,7 +707,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, | |||
707 | 707 | ||
708 | rtattr_failure: | 708 | rtattr_failure: |
709 | nlmsg_failure: | 709 | nlmsg_failure: |
710 | skb_trim(skb, b - skb->data); | 710 | kfree_skb(skb); |
711 | return -1; | 711 | return -1; |
712 | } | 712 | } |
713 | 713 | ||
diff --git a/net/sctp/output.c b/net/sctp/output.c index a40991ef72c9..437cba7260a4 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -608,7 +608,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
608 | * When a Fast Retransmit is being performed the sender SHOULD | 608 | * When a Fast Retransmit is being performed the sender SHOULD |
609 | * ignore the value of cwnd and SHOULD NOT delay retransmission. | 609 | * ignore the value of cwnd and SHOULD NOT delay retransmission. |
610 | */ | 610 | */ |
611 | if (!chunk->fast_retransmit) | 611 | if (chunk->fast_retransmit <= 0) |
612 | if (transport->flight_size >= transport->cwnd) { | 612 | if (transport->flight_size >= transport->cwnd) { |
613 | retval = SCTP_XMIT_RWND_FULL; | 613 | retval = SCTP_XMIT_RWND_FULL; |
614 | goto finish; | 614 | goto finish; |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index efb72faba20c..f148f9576dd2 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -406,7 +406,7 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
406 | * chunks that are not yet acked should be added to the | 406 | * chunks that are not yet acked should be added to the |
407 | * retransmit queue. | 407 | * retransmit queue. |
408 | */ | 408 | */ |
409 | if ((fast_retransmit && chunk->fast_retransmit) || | 409 | if ((fast_retransmit && (chunk->fast_retransmit > 0)) || |
410 | (!fast_retransmit && !chunk->tsn_gap_acked)) { | 410 | (!fast_retransmit && !chunk->tsn_gap_acked)) { |
411 | /* RFC 2960 6.2.1 Processing a Received SACK | 411 | /* RFC 2960 6.2.1 Processing a Received SACK |
412 | * | 412 | * |
@@ -603,7 +603,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | |||
603 | /* Mark the chunk as ineligible for fast retransmit | 603 | /* Mark the chunk as ineligible for fast retransmit |
604 | * after it is retransmitted. | 604 | * after it is retransmitted. |
605 | */ | 605 | */ |
606 | chunk->fast_retransmit = 0; | 606 | if (chunk->fast_retransmit > 0) |
607 | chunk->fast_retransmit = -1; | ||
607 | 608 | ||
608 | *start_timer = 1; | 609 | *start_timer = 1; |
609 | q->empty = 0; | 610 | q->empty = 0; |
@@ -621,7 +622,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | |||
621 | list_for_each(lchunk1, lqueue) { | 622 | list_for_each(lchunk1, lqueue) { |
622 | chunk1 = list_entry(lchunk1, struct sctp_chunk, | 623 | chunk1 = list_entry(lchunk1, struct sctp_chunk, |
623 | transmitted_list); | 624 | transmitted_list); |
624 | chunk1->fast_retransmit = 0; | 625 | if (chunk1->fast_retransmit > 0) |
626 | chunk1->fast_retransmit = -1; | ||
625 | } | 627 | } |
626 | } | 628 | } |
627 | } | 629 | } |
@@ -1562,11 +1564,11 @@ static void sctp_mark_missing(struct sctp_outq *q, | |||
1562 | /* | 1564 | /* |
1563 | * M4) If any DATA chunk is found to have a | 1565 | * M4) If any DATA chunk is found to have a |
1564 | * 'TSN.Missing.Report' | 1566 | * 'TSN.Missing.Report' |
1565 | * value larger than or equal to 4, mark that chunk for | 1567 | * value larger than or equal to 3, mark that chunk for |
1566 | * retransmission and start the fast retransmit procedure. | 1568 | * retransmission and start the fast retransmit procedure. |
1567 | */ | 1569 | */ |
1568 | 1570 | ||
1569 | if (chunk->tsn_missing_report >= 4) { | 1571 | if (chunk->tsn_missing_report >= 3) { |
1570 | chunk->fast_retransmit = 1; | 1572 | chunk->fast_retransmit = 1; |
1571 | do_fast_retransmit = 1; | 1573 | do_fast_retransmit = 1; |
1572 | } | 1574 | } |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 71c9a961c321..2b9a832b29a7 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -884,7 +884,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, | |||
884 | { | 884 | { |
885 | struct sctp_transport *transport = (struct sctp_transport *) arg; | 885 | struct sctp_transport *transport = (struct sctp_transport *) arg; |
886 | 886 | ||
887 | if (asoc->overall_error_count > asoc->max_retrans) { | 887 | if (asoc->overall_error_count >= asoc->max_retrans) { |
888 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | 888 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ |
889 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 889 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
890 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 890 | SCTP_U32(SCTP_ERROR_NO_ERROR)); |
@@ -2122,7 +2122,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, | |||
2122 | struct sctp_bind_addr *bp; | 2122 | struct sctp_bind_addr *bp; |
2123 | int attempts = asoc->init_err_counter + 1; | 2123 | int attempts = asoc->init_err_counter + 1; |
2124 | 2124 | ||
2125 | if (attempts >= asoc->max_init_attempts) { | 2125 | if (attempts > asoc->max_init_attempts) { |
2126 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 2126 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
2127 | SCTP_U32(SCTP_ERROR_STALE_COOKIE)); | 2127 | SCTP_U32(SCTP_ERROR_STALE_COOKIE)); |
2128 | return SCTP_DISPOSITION_DELETE_TCB; | 2128 | return SCTP_DISPOSITION_DELETE_TCB; |
@@ -4640,7 +4640,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep, | |||
4640 | 4640 | ||
4641 | SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n"); | 4641 | SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n"); |
4642 | 4642 | ||
4643 | if (attempts < asoc->max_init_attempts) { | 4643 | if (attempts <= asoc->max_init_attempts) { |
4644 | bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; | 4644 | bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; |
4645 | repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0); | 4645 | repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0); |
4646 | if (!repl) | 4646 | if (!repl) |
@@ -4697,7 +4697,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep | |||
4697 | 4697 | ||
4698 | SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n"); | 4698 | SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n"); |
4699 | 4699 | ||
4700 | if (attempts < asoc->max_init_attempts) { | 4700 | if (attempts <= asoc->max_init_attempts) { |
4701 | repl = sctp_make_cookie_echo(asoc, NULL); | 4701 | repl = sctp_make_cookie_echo(asoc, NULL); |
4702 | if (!repl) | 4702 | if (!repl) |
4703 | return SCTP_DISPOSITION_NOMEM; | 4703 | return SCTP_DISPOSITION_NOMEM; |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index fb1821d9f338..0ea947eb6813 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -5426,7 +5426,7 @@ out: | |||
5426 | return err; | 5426 | return err; |
5427 | 5427 | ||
5428 | do_error: | 5428 | do_error: |
5429 | if (asoc->init_err_counter + 1 >= asoc->max_init_attempts) | 5429 | if (asoc->init_err_counter + 1 > asoc->max_init_attempts) |
5430 | err = -ETIMEDOUT; | 5430 | err = -ETIMEDOUT; |
5431 | else | 5431 | else |
5432 | err = -ECONNREFUSED; | 5432 | err = -ECONNREFUSED; |
diff --git a/net/socket.c b/net/socket.c index b38a263853c3..a00851f981db 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -2078,7 +2078,7 @@ void socket_seq_show(struct seq_file *seq) | |||
2078 | int cpu; | 2078 | int cpu; |
2079 | int counter = 0; | 2079 | int counter = 0; |
2080 | 2080 | ||
2081 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 2081 | for_each_cpu(cpu) |
2082 | counter += per_cpu(sockets_in_use, cpu); | 2082 | counter += per_cpu(sockets_in_use, cpu); |
2083 | 2083 | ||
2084 | /* It can be negative, by the way. 8) */ | 2084 | /* It can be negative, by the way. 8) */ |
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 9ac1b8c26c01..8d6f1a176b15 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c | |||
@@ -184,7 +184,7 @@ rpcauth_gc_credcache(struct rpc_auth *auth, struct hlist_head *free) | |||
184 | */ | 184 | */ |
185 | struct rpc_cred * | 185 | struct rpc_cred * |
186 | rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | 186 | rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, |
187 | int taskflags) | 187 | int flags) |
188 | { | 188 | { |
189 | struct rpc_cred_cache *cache = auth->au_credcache; | 189 | struct rpc_cred_cache *cache = auth->au_credcache; |
190 | HLIST_HEAD(free); | 190 | HLIST_HEAD(free); |
@@ -193,7 +193,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred, | |||
193 | *cred = NULL; | 193 | *cred = NULL; |
194 | int nr = 0; | 194 | int nr = 0; |
195 | 195 | ||
196 | if (!(taskflags & RPC_TASK_ROOTCREDS)) | 196 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) |
197 | nr = acred->uid & RPC_CREDCACHE_MASK; | 197 | nr = acred->uid & RPC_CREDCACHE_MASK; |
198 | retry: | 198 | retry: |
199 | spin_lock(&rpc_credcache_lock); | 199 | spin_lock(&rpc_credcache_lock); |
@@ -202,7 +202,7 @@ retry: | |||
202 | hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { | 202 | hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { |
203 | struct rpc_cred *entry; | 203 | struct rpc_cred *entry; |
204 | entry = hlist_entry(pos, struct rpc_cred, cr_hash); | 204 | entry = hlist_entry(pos, struct rpc_cred, cr_hash); |
205 | if (entry->cr_ops->crmatch(acred, entry, taskflags)) { | 205 | if (entry->cr_ops->crmatch(acred, entry, flags)) { |
206 | hlist_del(&entry->cr_hash); | 206 | hlist_del(&entry->cr_hash); |
207 | cred = entry; | 207 | cred = entry; |
208 | break; | 208 | break; |
@@ -224,7 +224,7 @@ retry: | |||
224 | rpcauth_destroy_credlist(&free); | 224 | rpcauth_destroy_credlist(&free); |
225 | 225 | ||
226 | if (!cred) { | 226 | if (!cred) { |
227 | new = auth->au_ops->crcreate(auth, acred, taskflags); | 227 | new = auth->au_ops->crcreate(auth, acred, flags); |
228 | if (!IS_ERR(new)) { | 228 | if (!IS_ERR(new)) { |
229 | #ifdef RPC_DEBUG | 229 | #ifdef RPC_DEBUG |
230 | new->cr_magic = RPCAUTH_CRED_MAGIC; | 230 | new->cr_magic = RPCAUTH_CRED_MAGIC; |
@@ -232,13 +232,21 @@ retry: | |||
232 | goto retry; | 232 | goto retry; |
233 | } else | 233 | } else |
234 | cred = new; | 234 | cred = new; |
235 | } else if ((cred->cr_flags & RPCAUTH_CRED_NEW) | ||
236 | && cred->cr_ops->cr_init != NULL | ||
237 | && !(flags & RPCAUTH_LOOKUP_NEW)) { | ||
238 | int res = cred->cr_ops->cr_init(auth, cred); | ||
239 | if (res < 0) { | ||
240 | put_rpccred(cred); | ||
241 | cred = ERR_PTR(res); | ||
242 | } | ||
235 | } | 243 | } |
236 | 244 | ||
237 | return (struct rpc_cred *) cred; | 245 | return (struct rpc_cred *) cred; |
238 | } | 246 | } |
239 | 247 | ||
240 | struct rpc_cred * | 248 | struct rpc_cred * |
241 | rpcauth_lookupcred(struct rpc_auth *auth, int taskflags) | 249 | rpcauth_lookupcred(struct rpc_auth *auth, int flags) |
242 | { | 250 | { |
243 | struct auth_cred acred = { | 251 | struct auth_cred acred = { |
244 | .uid = current->fsuid, | 252 | .uid = current->fsuid, |
@@ -250,7 +258,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int taskflags) | |||
250 | dprintk("RPC: looking up %s cred\n", | 258 | dprintk("RPC: looking up %s cred\n", |
251 | auth->au_ops->au_name); | 259 | auth->au_ops->au_name); |
252 | get_group_info(acred.group_info); | 260 | get_group_info(acred.group_info); |
253 | ret = auth->au_ops->lookup_cred(auth, &acred, taskflags); | 261 | ret = auth->au_ops->lookup_cred(auth, &acred, flags); |
254 | put_group_info(acred.group_info); | 262 | put_group_info(acred.group_info); |
255 | return ret; | 263 | return ret; |
256 | } | 264 | } |
@@ -265,11 +273,14 @@ rpcauth_bindcred(struct rpc_task *task) | |||
265 | .group_info = current->group_info, | 273 | .group_info = current->group_info, |
266 | }; | 274 | }; |
267 | struct rpc_cred *ret; | 275 | struct rpc_cred *ret; |
276 | int flags = 0; | ||
268 | 277 | ||
269 | dprintk("RPC: %4d looking up %s cred\n", | 278 | dprintk("RPC: %4d looking up %s cred\n", |
270 | task->tk_pid, task->tk_auth->au_ops->au_name); | 279 | task->tk_pid, task->tk_auth->au_ops->au_name); |
271 | get_group_info(acred.group_info); | 280 | get_group_info(acred.group_info); |
272 | ret = auth->au_ops->lookup_cred(auth, &acred, task->tk_flags); | 281 | if (task->tk_flags & RPC_TASK_ROOTCREDS) |
282 | flags |= RPCAUTH_LOOKUP_ROOTCREDS; | ||
283 | ret = auth->au_ops->lookup_cred(auth, &acred, flags); | ||
273 | if (!IS_ERR(ret)) | 284 | if (!IS_ERR(ret)) |
274 | task->tk_msg.rpc_cred = ret; | 285 | task->tk_msg.rpc_cred = ret; |
275 | else | 286 | else |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 8d782282ec19..bb46efd92e57 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -158,6 +158,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) | |||
158 | old = gss_cred->gc_ctx; | 158 | old = gss_cred->gc_ctx; |
159 | gss_cred->gc_ctx = ctx; | 159 | gss_cred->gc_ctx = ctx; |
160 | cred->cr_flags |= RPCAUTH_CRED_UPTODATE; | 160 | cred->cr_flags |= RPCAUTH_CRED_UPTODATE; |
161 | cred->cr_flags &= ~RPCAUTH_CRED_NEW; | ||
161 | write_unlock(&gss_ctx_lock); | 162 | write_unlock(&gss_ctx_lock); |
162 | if (old) | 163 | if (old) |
163 | gss_put_ctx(old); | 164 | gss_put_ctx(old); |
@@ -580,7 +581,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
580 | } else { | 581 | } else { |
581 | struct auth_cred acred = { .uid = uid }; | 582 | struct auth_cred acred = { .uid = uid }; |
582 | spin_unlock(&gss_auth->lock); | 583 | spin_unlock(&gss_auth->lock); |
583 | cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, 0); | 584 | cred = rpcauth_lookup_credcache(clnt->cl_auth, &acred, RPCAUTH_LOOKUP_NEW); |
584 | if (IS_ERR(cred)) { | 585 | if (IS_ERR(cred)) { |
585 | err = PTR_ERR(cred); | 586 | err = PTR_ERR(cred); |
586 | goto err_put_ctx; | 587 | goto err_put_ctx; |
@@ -758,13 +759,13 @@ gss_destroy_cred(struct rpc_cred *rc) | |||
758 | * Lookup RPCSEC_GSS cred for the current process | 759 | * Lookup RPCSEC_GSS cred for the current process |
759 | */ | 760 | */ |
760 | static struct rpc_cred * | 761 | static struct rpc_cred * |
761 | gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) | 762 | gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) |
762 | { | 763 | { |
763 | return rpcauth_lookup_credcache(auth, acred, taskflags); | 764 | return rpcauth_lookup_credcache(auth, acred, flags); |
764 | } | 765 | } |
765 | 766 | ||
766 | static struct rpc_cred * | 767 | static struct rpc_cred * |
767 | gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) | 768 | gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) |
768 | { | 769 | { |
769 | struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); | 770 | struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); |
770 | struct gss_cred *cred = NULL; | 771 | struct gss_cred *cred = NULL; |
@@ -785,13 +786,8 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int taskflags) | |||
785 | */ | 786 | */ |
786 | cred->gc_flags = 0; | 787 | cred->gc_flags = 0; |
787 | cred->gc_base.cr_ops = &gss_credops; | 788 | cred->gc_base.cr_ops = &gss_credops; |
789 | cred->gc_base.cr_flags = RPCAUTH_CRED_NEW; | ||
788 | cred->gc_service = gss_auth->service; | 790 | cred->gc_service = gss_auth->service; |
789 | do { | ||
790 | err = gss_create_upcall(gss_auth, cred); | ||
791 | } while (err == -EAGAIN); | ||
792 | if (err < 0) | ||
793 | goto out_err; | ||
794 | |||
795 | return &cred->gc_base; | 791 | return &cred->gc_base; |
796 | 792 | ||
797 | out_err: | 793 | out_err: |
@@ -801,13 +797,34 @@ out_err: | |||
801 | } | 797 | } |
802 | 798 | ||
803 | static int | 799 | static int |
804 | gss_match(struct auth_cred *acred, struct rpc_cred *rc, int taskflags) | 800 | gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred) |
801 | { | ||
802 | struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); | ||
803 | struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base); | ||
804 | int err; | ||
805 | |||
806 | do { | ||
807 | err = gss_create_upcall(gss_auth, gss_cred); | ||
808 | } while (err == -EAGAIN); | ||
809 | return err; | ||
810 | } | ||
811 | |||
812 | static int | ||
813 | gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags) | ||
805 | { | 814 | { |
806 | struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); | 815 | struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); |
807 | 816 | ||
817 | /* | ||
818 | * If the searchflags have set RPCAUTH_LOOKUP_NEW, then | ||
819 | * we don't really care if the credential has expired or not, | ||
820 | * since the caller should be prepared to reinitialise it. | ||
821 | */ | ||
822 | if ((flags & RPCAUTH_LOOKUP_NEW) && (rc->cr_flags & RPCAUTH_CRED_NEW)) | ||
823 | goto out; | ||
808 | /* Don't match with creds that have expired. */ | 824 | /* Don't match with creds that have expired. */ |
809 | if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) | 825 | if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) |
810 | return 0; | 826 | return 0; |
827 | out: | ||
811 | return (rc->cr_uid == acred->uid); | 828 | return (rc->cr_uid == acred->uid); |
812 | } | 829 | } |
813 | 830 | ||
@@ -1241,6 +1258,7 @@ static struct rpc_authops authgss_ops = { | |||
1241 | static struct rpc_credops gss_credops = { | 1258 | static struct rpc_credops gss_credops = { |
1242 | .cr_name = "AUTH_GSS", | 1259 | .cr_name = "AUTH_GSS", |
1243 | .crdestroy = gss_destroy_cred, | 1260 | .crdestroy = gss_destroy_cred, |
1261 | .cr_init = gss_cred_init, | ||
1244 | .crmatch = gss_match, | 1262 | .crmatch = gss_match, |
1245 | .crmarshal = gss_marshal, | 1263 | .crmarshal = gss_marshal, |
1246 | .crrefresh = gss_refresh, | 1264 | .crrefresh = gss_refresh, |
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 1b3ed4fd1987..df14b6bfbf10 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c | |||
@@ -75,7 +75,7 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) | |||
75 | 75 | ||
76 | atomic_set(&cred->uc_count, 1); | 76 | atomic_set(&cred->uc_count, 1); |
77 | cred->uc_flags = RPCAUTH_CRED_UPTODATE; | 77 | cred->uc_flags = RPCAUTH_CRED_UPTODATE; |
78 | if (flags & RPC_TASK_ROOTCREDS) { | 78 | if (flags & RPCAUTH_LOOKUP_ROOTCREDS) { |
79 | cred->uc_uid = 0; | 79 | cred->uc_uid = 0; |
80 | cred->uc_gid = 0; | 80 | cred->uc_gid = 0; |
81 | cred->uc_gids[0] = NOGROUP; | 81 | cred->uc_gids[0] = NOGROUP; |
@@ -108,12 +108,12 @@ unx_destroy_cred(struct rpc_cred *cred) | |||
108 | * request root creds (e.g. for NFS swapping). | 108 | * request root creds (e.g. for NFS swapping). |
109 | */ | 109 | */ |
110 | static int | 110 | static int |
111 | unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int taskflags) | 111 | unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) |
112 | { | 112 | { |
113 | struct unx_cred *cred = (struct unx_cred *) rcred; | 113 | struct unx_cred *cred = (struct unx_cred *) rcred; |
114 | int i; | 114 | int i; |
115 | 115 | ||
116 | if (!(taskflags & RPC_TASK_ROOTCREDS)) { | 116 | if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) { |
117 | int groups; | 117 | int groups; |
118 | 118 | ||
119 | if (cred->uc_uid != acred->uid | 119 | if (cred->uc_uid != acred->uid |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index d2f0550c4ba0..d78479782045 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -113,7 +113,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname, | |||
113 | 113 | ||
114 | err = -EINVAL; | 114 | err = -EINVAL; |
115 | if (!xprt) | 115 | if (!xprt) |
116 | goto out_err; | 116 | goto out_no_xprt; |
117 | if (vers >= program->nrvers || !(version = program->version[vers])) | 117 | if (vers >= program->nrvers || !(version = program->version[vers])) |
118 | goto out_err; | 118 | goto out_err; |
119 | 119 | ||
@@ -182,6 +182,7 @@ out_no_path: | |||
182 | kfree(clnt); | 182 | kfree(clnt); |
183 | out_err: | 183 | out_err: |
184 | xprt_destroy(xprt); | 184 | xprt_destroy(xprt); |
185 | out_no_xprt: | ||
185 | return ERR_PTR(err); | 186 | return ERR_PTR(err); |
186 | } | 187 | } |
187 | 188 | ||
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 9764c80ab0b2..a5c0c7b6e151 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -38,44 +38,42 @@ static kmem_cache_t *rpc_inode_cachep __read_mostly; | |||
38 | 38 | ||
39 | #define RPC_UPCALL_TIMEOUT (30*HZ) | 39 | #define RPC_UPCALL_TIMEOUT (30*HZ) |
40 | 40 | ||
41 | static void | 41 | static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, |
42 | __rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, int err) | 42 | void (*destroy_msg)(struct rpc_pipe_msg *), int err) |
43 | { | 43 | { |
44 | struct rpc_pipe_msg *msg; | 44 | struct rpc_pipe_msg *msg; |
45 | void (*destroy_msg)(struct rpc_pipe_msg *); | ||
46 | 45 | ||
47 | destroy_msg = rpci->ops->destroy_msg; | 46 | if (list_empty(head)) |
48 | while (!list_empty(head)) { | 47 | return; |
48 | do { | ||
49 | msg = list_entry(head->next, struct rpc_pipe_msg, list); | 49 | msg = list_entry(head->next, struct rpc_pipe_msg, list); |
50 | list_del_init(&msg->list); | 50 | list_del(&msg->list); |
51 | msg->errno = err; | 51 | msg->errno = err; |
52 | destroy_msg(msg); | 52 | destroy_msg(msg); |
53 | } | 53 | } while (!list_empty(head)); |
54 | } | ||
55 | |||
56 | static void | ||
57 | __rpc_purge_upcall(struct inode *inode, int err) | ||
58 | { | ||
59 | struct rpc_inode *rpci = RPC_I(inode); | ||
60 | |||
61 | __rpc_purge_list(rpci, &rpci->pipe, err); | ||
62 | rpci->pipelen = 0; | ||
63 | wake_up(&rpci->waitq); | 54 | wake_up(&rpci->waitq); |
64 | } | 55 | } |
65 | 56 | ||
66 | static void | 57 | static void |
67 | rpc_timeout_upcall_queue(void *data) | 58 | rpc_timeout_upcall_queue(void *data) |
68 | { | 59 | { |
60 | LIST_HEAD(free_list); | ||
69 | struct rpc_inode *rpci = (struct rpc_inode *)data; | 61 | struct rpc_inode *rpci = (struct rpc_inode *)data; |
70 | struct inode *inode = &rpci->vfs_inode; | 62 | struct inode *inode = &rpci->vfs_inode; |
63 | void (*destroy_msg)(struct rpc_pipe_msg *); | ||
71 | 64 | ||
72 | mutex_lock(&inode->i_mutex); | 65 | spin_lock(&inode->i_lock); |
73 | if (rpci->ops == NULL) | 66 | if (rpci->ops == NULL) { |
74 | goto out; | 67 | spin_unlock(&inode->i_lock); |
75 | if (rpci->nreaders == 0 && !list_empty(&rpci->pipe)) | 68 | return; |
76 | __rpc_purge_upcall(inode, -ETIMEDOUT); | 69 | } |
77 | out: | 70 | destroy_msg = rpci->ops->destroy_msg; |
78 | mutex_unlock(&inode->i_mutex); | 71 | if (rpci->nreaders == 0) { |
72 | list_splice_init(&rpci->pipe, &free_list); | ||
73 | rpci->pipelen = 0; | ||
74 | } | ||
75 | spin_unlock(&inode->i_lock); | ||
76 | rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT); | ||
79 | } | 77 | } |
80 | 78 | ||
81 | int | 79 | int |
@@ -84,7 +82,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) | |||
84 | struct rpc_inode *rpci = RPC_I(inode); | 82 | struct rpc_inode *rpci = RPC_I(inode); |
85 | int res = -EPIPE; | 83 | int res = -EPIPE; |
86 | 84 | ||
87 | mutex_lock(&inode->i_mutex); | 85 | spin_lock(&inode->i_lock); |
88 | if (rpci->ops == NULL) | 86 | if (rpci->ops == NULL) |
89 | goto out; | 87 | goto out; |
90 | if (rpci->nreaders) { | 88 | if (rpci->nreaders) { |
@@ -100,7 +98,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) | |||
100 | res = 0; | 98 | res = 0; |
101 | } | 99 | } |
102 | out: | 100 | out: |
103 | mutex_unlock(&inode->i_mutex); | 101 | spin_unlock(&inode->i_lock); |
104 | wake_up(&rpci->waitq); | 102 | wake_up(&rpci->waitq); |
105 | return res; | 103 | return res; |
106 | } | 104 | } |
@@ -115,21 +113,29 @@ static void | |||
115 | rpc_close_pipes(struct inode *inode) | 113 | rpc_close_pipes(struct inode *inode) |
116 | { | 114 | { |
117 | struct rpc_inode *rpci = RPC_I(inode); | 115 | struct rpc_inode *rpci = RPC_I(inode); |
116 | struct rpc_pipe_ops *ops; | ||
118 | 117 | ||
119 | mutex_lock(&inode->i_mutex); | 118 | mutex_lock(&inode->i_mutex); |
120 | if (rpci->ops != NULL) { | 119 | ops = rpci->ops; |
120 | if (ops != NULL) { | ||
121 | LIST_HEAD(free_list); | ||
122 | |||
123 | spin_lock(&inode->i_lock); | ||
121 | rpci->nreaders = 0; | 124 | rpci->nreaders = 0; |
122 | __rpc_purge_list(rpci, &rpci->in_upcall, -EPIPE); | 125 | list_splice_init(&rpci->in_upcall, &free_list); |
123 | __rpc_purge_upcall(inode, -EPIPE); | 126 | list_splice_init(&rpci->pipe, &free_list); |
124 | rpci->nwriters = 0; | 127 | rpci->pipelen = 0; |
125 | if (rpci->ops->release_pipe) | ||
126 | rpci->ops->release_pipe(inode); | ||
127 | rpci->ops = NULL; | 128 | rpci->ops = NULL; |
129 | spin_unlock(&inode->i_lock); | ||
130 | rpc_purge_list(rpci, &free_list, ops->destroy_msg, -EPIPE); | ||
131 | rpci->nwriters = 0; | ||
132 | if (ops->release_pipe) | ||
133 | ops->release_pipe(inode); | ||
134 | cancel_delayed_work(&rpci->queue_timeout); | ||
135 | flush_scheduled_work(); | ||
128 | } | 136 | } |
129 | rpc_inode_setowner(inode, NULL); | 137 | rpc_inode_setowner(inode, NULL); |
130 | mutex_unlock(&inode->i_mutex); | 138 | mutex_unlock(&inode->i_mutex); |
131 | cancel_delayed_work(&rpci->queue_timeout); | ||
132 | flush_scheduled_work(); | ||
133 | } | 139 | } |
134 | 140 | ||
135 | static struct inode * | 141 | static struct inode * |
@@ -177,16 +183,26 @@ rpc_pipe_release(struct inode *inode, struct file *filp) | |||
177 | goto out; | 183 | goto out; |
178 | msg = (struct rpc_pipe_msg *)filp->private_data; | 184 | msg = (struct rpc_pipe_msg *)filp->private_data; |
179 | if (msg != NULL) { | 185 | if (msg != NULL) { |
186 | spin_lock(&inode->i_lock); | ||
180 | msg->errno = -EAGAIN; | 187 | msg->errno = -EAGAIN; |
181 | list_del_init(&msg->list); | 188 | list_del(&msg->list); |
189 | spin_unlock(&inode->i_lock); | ||
182 | rpci->ops->destroy_msg(msg); | 190 | rpci->ops->destroy_msg(msg); |
183 | } | 191 | } |
184 | if (filp->f_mode & FMODE_WRITE) | 192 | if (filp->f_mode & FMODE_WRITE) |
185 | rpci->nwriters --; | 193 | rpci->nwriters --; |
186 | if (filp->f_mode & FMODE_READ) | 194 | if (filp->f_mode & FMODE_READ) { |
187 | rpci->nreaders --; | 195 | rpci->nreaders --; |
188 | if (!rpci->nreaders) | 196 | if (rpci->nreaders == 0) { |
189 | __rpc_purge_upcall(inode, -EAGAIN); | 197 | LIST_HEAD(free_list); |
198 | spin_lock(&inode->i_lock); | ||
199 | list_splice_init(&rpci->pipe, &free_list); | ||
200 | rpci->pipelen = 0; | ||
201 | spin_unlock(&inode->i_lock); | ||
202 | rpc_purge_list(rpci, &free_list, | ||
203 | rpci->ops->destroy_msg, -EAGAIN); | ||
204 | } | ||
205 | } | ||
190 | if (rpci->ops->release_pipe) | 206 | if (rpci->ops->release_pipe) |
191 | rpci->ops->release_pipe(inode); | 207 | rpci->ops->release_pipe(inode); |
192 | out: | 208 | out: |
@@ -209,6 +225,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | |||
209 | } | 225 | } |
210 | msg = filp->private_data; | 226 | msg = filp->private_data; |
211 | if (msg == NULL) { | 227 | if (msg == NULL) { |
228 | spin_lock(&inode->i_lock); | ||
212 | if (!list_empty(&rpci->pipe)) { | 229 | if (!list_empty(&rpci->pipe)) { |
213 | msg = list_entry(rpci->pipe.next, | 230 | msg = list_entry(rpci->pipe.next, |
214 | struct rpc_pipe_msg, | 231 | struct rpc_pipe_msg, |
@@ -218,6 +235,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | |||
218 | filp->private_data = msg; | 235 | filp->private_data = msg; |
219 | msg->copied = 0; | 236 | msg->copied = 0; |
220 | } | 237 | } |
238 | spin_unlock(&inode->i_lock); | ||
221 | if (msg == NULL) | 239 | if (msg == NULL) |
222 | goto out_unlock; | 240 | goto out_unlock; |
223 | } | 241 | } |
@@ -225,7 +243,9 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) | |||
225 | res = rpci->ops->upcall(filp, msg, buf, len); | 243 | res = rpci->ops->upcall(filp, msg, buf, len); |
226 | if (res < 0 || msg->len == msg->copied) { | 244 | if (res < 0 || msg->len == msg->copied) { |
227 | filp->private_data = NULL; | 245 | filp->private_data = NULL; |
228 | list_del_init(&msg->list); | 246 | spin_lock(&inode->i_lock); |
247 | list_del(&msg->list); | ||
248 | spin_unlock(&inode->i_lock); | ||
229 | rpci->ops->destroy_msg(msg); | 249 | rpci->ops->destroy_msg(msg); |
230 | } | 250 | } |
231 | out_unlock: | 251 | out_unlock: |
@@ -610,7 +630,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd) | |||
610 | return ERR_PTR(error); | 630 | return ERR_PTR(error); |
611 | dir = nd->dentry->d_inode; | 631 | dir = nd->dentry->d_inode; |
612 | mutex_lock(&dir->i_mutex); | 632 | mutex_lock(&dir->i_mutex); |
613 | dentry = lookup_hash(nd); | 633 | dentry = lookup_one_len(nd->last.name, nd->dentry, nd->last.len); |
614 | if (IS_ERR(dentry)) | 634 | if (IS_ERR(dentry)) |
615 | goto out_err; | 635 | goto out_err; |
616 | if (dentry->d_inode) { | 636 | if (dentry->d_inode) { |
@@ -672,7 +692,7 @@ rpc_rmdir(char *path) | |||
672 | return error; | 692 | return error; |
673 | dir = nd.dentry->d_inode; | 693 | dir = nd.dentry->d_inode; |
674 | mutex_lock(&dir->i_mutex); | 694 | mutex_lock(&dir->i_mutex); |
675 | dentry = lookup_hash(&nd); | 695 | dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); |
676 | if (IS_ERR(dentry)) { | 696 | if (IS_ERR(dentry)) { |
677 | error = PTR_ERR(dentry); | 697 | error = PTR_ERR(dentry); |
678 | goto out_release; | 698 | goto out_release; |
@@ -733,7 +753,7 @@ rpc_unlink(char *path) | |||
733 | return error; | 753 | return error; |
734 | dir = nd.dentry->d_inode; | 754 | dir = nd.dentry->d_inode; |
735 | mutex_lock(&dir->i_mutex); | 755 | mutex_lock(&dir->i_mutex); |
736 | dentry = lookup_hash(&nd); | 756 | dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); |
737 | if (IS_ERR(dentry)) { | 757 | if (IS_ERR(dentry)) { |
738 | error = PTR_ERR(dentry); | 758 | error = PTR_ERR(dentry); |
739 | goto out_release; | 759 | goto out_release; |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 7415406aa1ae..e838d042f7f5 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -515,16 +515,14 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | |||
515 | */ | 515 | */ |
516 | void rpc_wake_up(struct rpc_wait_queue *queue) | 516 | void rpc_wake_up(struct rpc_wait_queue *queue) |
517 | { | 517 | { |
518 | struct rpc_task *task; | 518 | struct rpc_task *task, *next; |
519 | |||
520 | struct list_head *head; | 519 | struct list_head *head; |
520 | |||
521 | spin_lock_bh(&queue->lock); | 521 | spin_lock_bh(&queue->lock); |
522 | head = &queue->tasks[queue->maxpriority]; | 522 | head = &queue->tasks[queue->maxpriority]; |
523 | for (;;) { | 523 | for (;;) { |
524 | while (!list_empty(head)) { | 524 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) |
525 | task = list_entry(head->next, struct rpc_task, u.tk_wait.list); | ||
526 | __rpc_wake_up_task(task); | 525 | __rpc_wake_up_task(task); |
527 | } | ||
528 | if (head == &queue->tasks[0]) | 526 | if (head == &queue->tasks[0]) |
529 | break; | 527 | break; |
530 | head--; | 528 | head--; |
@@ -541,14 +539,13 @@ void rpc_wake_up(struct rpc_wait_queue *queue) | |||
541 | */ | 539 | */ |
542 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | 540 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) |
543 | { | 541 | { |
542 | struct rpc_task *task, *next; | ||
544 | struct list_head *head; | 543 | struct list_head *head; |
545 | struct rpc_task *task; | ||
546 | 544 | ||
547 | spin_lock_bh(&queue->lock); | 545 | spin_lock_bh(&queue->lock); |
548 | head = &queue->tasks[queue->maxpriority]; | 546 | head = &queue->tasks[queue->maxpriority]; |
549 | for (;;) { | 547 | for (;;) { |
550 | while (!list_empty(head)) { | 548 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { |
551 | task = list_entry(head->next, struct rpc_task, u.tk_wait.list); | ||
552 | task->tk_status = status; | 549 | task->tk_status = status; |
553 | __rpc_wake_up_task(task); | 550 | __rpc_wake_up_task(task); |
554 | } | 551 | } |
@@ -908,10 +905,10 @@ void rpc_release_task(struct rpc_task *task) | |||
908 | 905 | ||
909 | /** | 906 | /** |
910 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it | 907 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it |
911 | * @clnt - pointer to RPC client | 908 | * @clnt: pointer to RPC client |
912 | * @flags - RPC flags | 909 | * @flags: RPC flags |
913 | * @ops - RPC call ops | 910 | * @ops: RPC call ops |
914 | * @data - user call data | 911 | * @data: user call data |
915 | */ | 912 | */ |
916 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | 913 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, |
917 | const struct rpc_call_ops *ops, | 914 | const struct rpc_call_ops *ops, |
@@ -930,6 +927,7 @@ EXPORT_SYMBOL(rpc_run_task); | |||
930 | /** | 927 | /** |
931 | * rpc_find_parent - find the parent of a child task. | 928 | * rpc_find_parent - find the parent of a child task. |
932 | * @child: child task | 929 | * @child: child task |
930 | * @parent: parent task | ||
933 | * | 931 | * |
934 | * Checks that the parent task is still sleeping on the | 932 | * Checks that the parent task is still sleeping on the |
935 | * queue 'childq'. If so returns a pointer to the parent. | 933 | * queue 'childq'. If so returns a pointer to the parent. |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 1b5989b1b670..c323cc6a28b0 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -547,7 +547,7 @@ static struct sock * unix_create1(struct socket *sock) | |||
547 | struct sock *sk = NULL; | 547 | struct sock *sk = NULL; |
548 | struct unix_sock *u; | 548 | struct unix_sock *u; |
549 | 549 | ||
550 | if (atomic_read(&unix_nr_socks) >= 2*files_stat.max_files) | 550 | if (atomic_read(&unix_nr_socks) >= 2*get_max_files()) |
551 | goto out; | 551 | goto out; |
552 | 552 | ||
553 | sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1); | 553 | sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1); |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 077bbf9fb9b7..ae62054a9fc4 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -13,7 +13,6 @@ | |||
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <asm/bug.h> | ||
17 | #include <linux/config.h> | 16 | #include <linux/config.h> |
18 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
19 | #include <linux/kmod.h> | 18 | #include <linux/kmod.h> |
@@ -783,7 +782,7 @@ int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl, | |||
783 | int nx = 0; | 782 | int nx = 0; |
784 | int err; | 783 | int err; |
785 | u32 genid; | 784 | u32 genid; |
786 | u16 family = dst_orig->ops->family; | 785 | u16 family; |
787 | u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); | 786 | u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); |
788 | u32 sk_sid = security_sk_sid(sk, fl, dir); | 787 | u32 sk_sid = security_sk_sid(sk, fl, dir); |
789 | restart: | 788 | restart: |
@@ -797,13 +796,14 @@ restart: | |||
797 | if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT]) | 796 | if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT]) |
798 | return 0; | 797 | return 0; |
799 | 798 | ||
800 | policy = flow_cache_lookup(fl, sk_sid, family, dir, | 799 | policy = flow_cache_lookup(fl, sk_sid, dst_orig->ops->family, |
801 | xfrm_policy_lookup); | 800 | dir, xfrm_policy_lookup); |
802 | } | 801 | } |
803 | 802 | ||
804 | if (!policy) | 803 | if (!policy) |
805 | return 0; | 804 | return 0; |
806 | 805 | ||
806 | family = dst_orig->ops->family; | ||
807 | policy->curlft.use_time = (unsigned long)xtime.tv_sec; | 807 | policy->curlft.use_time = (unsigned long)xtime.tv_sec; |
808 | 808 | ||
809 | switch (policy->action) { | 809 | switch (policy->action) { |
@@ -886,11 +886,11 @@ restart: | |||
886 | * We can't enlist stable bundles either. | 886 | * We can't enlist stable bundles either. |
887 | */ | 887 | */ |
888 | write_unlock_bh(&policy->lock); | 888 | write_unlock_bh(&policy->lock); |
889 | |||
890 | xfrm_pol_put(policy); | ||
891 | if (dst) | 889 | if (dst) |
892 | dst_free(dst); | 890 | dst_free(dst); |
893 | goto restart; | 891 | |
892 | err = -EHOSTUNREACH; | ||
893 | goto error; | ||
894 | } | 894 | } |
895 | dst->next = policy->bundles; | 895 | dst->next = policy->bundles; |
896 | policy->bundles = dst; | 896 | policy->bundles = dst; |
@@ -996,13 +996,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, | |||
996 | struct sec_decap_state *xvec = &(skb->sp->x[i]); | 996 | struct sec_decap_state *xvec = &(skb->sp->x[i]); |
997 | if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family)) | 997 | if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family)) |
998 | return 0; | 998 | return 0; |
999 | |||
1000 | /* If there is a post_input processor, try running it */ | ||
1001 | if (xvec->xvec->type->post_input && | ||
1002 | (xvec->xvec->type->post_input)(xvec->xvec, | ||
1003 | &(xvec->decap), | ||
1004 | skb) != 0) | ||
1005 | return 0; | ||
1006 | } | 999 | } |
1007 | } | 1000 | } |
1008 | 1001 | ||
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index e12d0be5f976..c656cbaf35e8 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -220,14 +220,14 @@ static int __xfrm_state_delete(struct xfrm_state *x) | |||
220 | x->km.state = XFRM_STATE_DEAD; | 220 | x->km.state = XFRM_STATE_DEAD; |
221 | spin_lock(&xfrm_state_lock); | 221 | spin_lock(&xfrm_state_lock); |
222 | list_del(&x->bydst); | 222 | list_del(&x->bydst); |
223 | atomic_dec(&x->refcnt); | 223 | __xfrm_state_put(x); |
224 | if (x->id.spi) { | 224 | if (x->id.spi) { |
225 | list_del(&x->byspi); | 225 | list_del(&x->byspi); |
226 | atomic_dec(&x->refcnt); | 226 | __xfrm_state_put(x); |
227 | } | 227 | } |
228 | spin_unlock(&xfrm_state_lock); | 228 | spin_unlock(&xfrm_state_lock); |
229 | if (del_timer(&x->timer)) | 229 | if (del_timer(&x->timer)) |
230 | atomic_dec(&x->refcnt); | 230 | __xfrm_state_put(x); |
231 | 231 | ||
232 | /* The number two in this test is the reference | 232 | /* The number two in this test is the reference |
233 | * mentioned in the comment below plus the reference | 233 | * mentioned in the comment below plus the reference |
@@ -243,7 +243,7 @@ static int __xfrm_state_delete(struct xfrm_state *x) | |||
243 | * The xfrm_state_alloc call gives a reference, and that | 243 | * The xfrm_state_alloc call gives a reference, and that |
244 | * is what we are dropping here. | 244 | * is what we are dropping here. |
245 | */ | 245 | */ |
246 | atomic_dec(&x->refcnt); | 246 | __xfrm_state_put(x); |
247 | err = 0; | 247 | err = 0; |
248 | } | 248 | } |
249 | 249 | ||
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index ac87a09ba83e..7de17559249a 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -345,7 +345,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma) | |||
345 | 345 | ||
346 | if (err < 0) { | 346 | if (err < 0) { |
347 | x->km.state = XFRM_STATE_DEAD; | 347 | x->km.state = XFRM_STATE_DEAD; |
348 | xfrm_state_put(x); | 348 | __xfrm_state_put(x); |
349 | goto out; | 349 | goto out; |
350 | } | 350 | } |
351 | 351 | ||