aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-30 11:14:42 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-30 11:14:42 -0400
commit152a6a9da1bd3ed5dcbbf6ff17c7ebde0eb9a754 (patch)
treecad354802870b7d4bc0402a6a6da44bd1f610bc6
parentcd9bb7e7367c03400d6e918fd3502820fc3b9084 (diff)
parent80787ebc2bbd8e675d8b9ff8cfa40f15134feebe (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (21 commits) [IPV4] SNMP: Support OutMcastPkts and OutBcastPkts [IPV4] SNMP: Support InMcastPkts and InBcastPkts [IPV4] SNMP: Support InTruncatedPkts [IPV4] SNMP: Support InNoRoutes [SNMP]: Add definitions for {In,Out}BcastPkts [TCP] FRTO: RFC4138 allows Nagle override when new data must be sent [TCP] FRTO: Delay skb available check until it's mandatory [XFRM]: Restrict upper layer information by bundle. [TCP]: Catch skb with S+L bugs earlier [PATCH] INET : IPV4 UDP lookups converted to a 2 pass algo [L2TP]: Add the ability to autoload a pppox protocol module. [SKB]: Introduce skb_queue_walk_safe() [AF_IUCV/IUCV]: smp_call_function deadlock [IPV6]: Fix slab corruption running ip6sic [TCP]: Update references in two old comments [XFRM]: Export SPD info [IPV6]: Track device renames in snmp6. [SCTP]: Fix sctp_getsockopt_local_addrs_old() to use local storage. [NET]: Remove NETIF_F_INTERNAL_STATS, default to internal stats. [NETPOLL]: Remove CONFIG_NETPOLL_RX ...
-rw-r--r--arch/s390/appldata/appldata_net_sum.c3
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/bonding/bond_main.c59
-rw-r--r--drivers/net/pppox.c8
-rw-r--r--drivers/parisc/led.c2
-rw-r--r--include/linux/netdevice.h9
-rw-r--r--include/linux/skbuff.h5
-rw-r--r--include/linux/snmp.h2
-rw-r--r--include/linux/xfrm.h35
-rw-r--r--include/net/flow.h6
-rw-r--r--include/net/iucv/iucv.h2
-rw-r--r--include/net/tcp.h9
-rw-r--r--include/net/xfrm.h23
-rw-r--r--net/core/dev.c45
-rw-r--r--net/ipv4/ip_input.c14
-rw-r--r--net/ipv4/ip_output.c6
-rw-r--r--net/ipv4/tcp.c14
-rw-r--r--net/ipv4/tcp_input.c30
-rw-r--r--net/ipv4/tcp_output.c8
-rw-r--r--net/ipv4/udp.c171
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/proc.c1
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/iucv/iucv.c205
-rw-r--r--net/sctp/socket.c96
-rw-r--r--net/xfrm/xfrm_policy.c71
-rw-r--r--net/xfrm/xfrm_user.c77
27 files changed, 641 insertions, 273 deletions
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 516b3ac9a9b5..a43f3488fecf 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -109,9 +109,6 @@ static void appldata_get_net_sum_data(void *data)
109 read_lock(&dev_base_lock); 109 read_lock(&dev_base_lock);
110 for (dev = dev_base; dev != NULL; dev = dev->next) { 110 for (dev = dev_base; dev != NULL; dev = dev->next) {
111 stats = dev->get_stats(dev); 111 stats = dev->get_stats(dev);
112 if (stats == NULL) {
113 continue;
114 }
115 rx_packets += stats->rx_packets; 112 rx_packets += stats->rx_packets;
116 tx_packets += stats->tx_packets; 113 tx_packets += stats->tx_packets;
117 rx_bytes += stats->rx_bytes; 114 rx_bytes += stats->rx_bytes;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index eb4b96c4d388..dcdad217df51 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2927,11 +2927,6 @@ endif #NETDEVICES
2927config NETPOLL 2927config NETPOLL
2928 def_bool NETCONSOLE 2928 def_bool NETCONSOLE
2929 2929
2930config NETPOLL_RX
2931 bool "Netpoll support for trapping incoming packets"
2932 default n
2933 depends on NETPOLL
2934
2935config NETPOLL_TRAP 2930config NETPOLL_TRAP
2936 bool "Netpoll traffic trapping" 2931 bool "Netpoll traffic trapping"
2937 default n 2932 default n
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index cea3783c92c5..724bce51f936 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1360,13 +1360,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1360 goto err_undo_flags; 1360 goto err_undo_flags;
1361 } 1361 }
1362 1362
1363 if (slave_dev->get_stats == NULL) {
1364 printk(KERN_NOTICE DRV_NAME
1365 ": %s: the driver for slave device %s does not provide "
1366 "get_stats function, network statistics will be "
1367 "inaccurate.\n", bond_dev->name, slave_dev->name);
1368 }
1369
1370 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); 1363 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
1371 if (!new_slave) { 1364 if (!new_slave) {
1372 res = -ENOMEM; 1365 res = -ENOMEM;
@@ -3641,33 +3634,31 @@ static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
3641 3634
3642 bond_for_each_slave(bond, slave, i) { 3635 bond_for_each_slave(bond, slave, i) {
3643 sstats = slave->dev->get_stats(slave->dev); 3636 sstats = slave->dev->get_stats(slave->dev);
3644 if (sstats) { 3637 stats->rx_packets += sstats->rx_packets;
3645 stats->rx_packets += sstats->rx_packets; 3638 stats->rx_bytes += sstats->rx_bytes;
3646 stats->rx_bytes += sstats->rx_bytes; 3639 stats->rx_errors += sstats->rx_errors;
3647 stats->rx_errors += sstats->rx_errors; 3640 stats->rx_dropped += sstats->rx_dropped;
3648 stats->rx_dropped += sstats->rx_dropped; 3641
3649 3642 stats->tx_packets += sstats->tx_packets;
3650 stats->tx_packets += sstats->tx_packets; 3643 stats->tx_bytes += sstats->tx_bytes;
3651 stats->tx_bytes += sstats->tx_bytes; 3644 stats->tx_errors += sstats->tx_errors;
3652 stats->tx_errors += sstats->tx_errors; 3645 stats->tx_dropped += sstats->tx_dropped;
3653 stats->tx_dropped += sstats->tx_dropped; 3646
3654 3647 stats->multicast += sstats->multicast;
3655 stats->multicast += sstats->multicast; 3648 stats->collisions += sstats->collisions;
3656 stats->collisions += sstats->collisions; 3649
3657 3650 stats->rx_length_errors += sstats->rx_length_errors;
3658 stats->rx_length_errors += sstats->rx_length_errors; 3651 stats->rx_over_errors += sstats->rx_over_errors;
3659 stats->rx_over_errors += sstats->rx_over_errors; 3652 stats->rx_crc_errors += sstats->rx_crc_errors;
3660 stats->rx_crc_errors += sstats->rx_crc_errors; 3653 stats->rx_frame_errors += sstats->rx_frame_errors;
3661 stats->rx_frame_errors += sstats->rx_frame_errors; 3654 stats->rx_fifo_errors += sstats->rx_fifo_errors;
3662 stats->rx_fifo_errors += sstats->rx_fifo_errors; 3655 stats->rx_missed_errors += sstats->rx_missed_errors;
3663 stats->rx_missed_errors += sstats->rx_missed_errors; 3656
3664 3657 stats->tx_aborted_errors += sstats->tx_aborted_errors;
3665 stats->tx_aborted_errors += sstats->tx_aborted_errors; 3658 stats->tx_carrier_errors += sstats->tx_carrier_errors;
3666 stats->tx_carrier_errors += sstats->tx_carrier_errors; 3659 stats->tx_fifo_errors += sstats->tx_fifo_errors;
3667 stats->tx_fifo_errors += sstats->tx_fifo_errors; 3660 stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
3668 stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors; 3661 stats->tx_window_errors += sstats->tx_window_errors;
3669 stats->tx_window_errors += sstats->tx_window_errors;
3670 }
3671 } 3662 }
3672 3663
3673 read_unlock_bh(&bond->lock); 3664 read_unlock_bh(&bond->lock);
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index 3f8115db4d54..f3e47d0c2b3c 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -31,6 +31,7 @@
31#include <linux/ppp_defs.h> 31#include <linux/ppp_defs.h>
32#include <linux/if_ppp.h> 32#include <linux/if_ppp.h>
33#include <linux/ppp_channel.h> 33#include <linux/ppp_channel.h>
34#include <linux/kmod.h>
34 35
35#include <net/sock.h> 36#include <net/sock.h>
36 37
@@ -114,6 +115,13 @@ static int pppox_create(struct socket *sock, int protocol)
114 goto out; 115 goto out;
115 116
116 rc = -EPROTONOSUPPORT; 117 rc = -EPROTONOSUPPORT;
118#ifdef CONFIG_KMOD
119 if (!pppox_protos[protocol]) {
120 char buffer[32];
121 sprintf(buffer, "pppox-proto-%d", protocol);
122 request_module(buffer);
123 }
124#endif
117 if (!pppox_protos[protocol] || 125 if (!pppox_protos[protocol] ||
118 !try_module_get(pppox_protos[protocol]->owner)) 126 !try_module_get(pppox_protos[protocol]->owner))
119 goto out; 127 goto out;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 453e6829756c..3df82fe9ce8c 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -373,8 +373,6 @@ static __inline__ int led_get_net_activity(void)
373 if (LOOPBACK(in_dev->ifa_list->ifa_local)) 373 if (LOOPBACK(in_dev->ifa_list->ifa_local))
374 continue; 374 continue;
375 stats = dev->get_stats(dev); 375 stats = dev->get_stats(dev);
376 if (!stats)
377 continue;
378 rx_total += stats->rx_packets; 376 rx_total += stats->rx_packets;
379 tx_total += stats->tx_packets; 377 tx_total += stats->tx_packets;
380 } 378 }
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e027a3750a77..ac0c92b1e002 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -325,7 +325,6 @@ struct net_device
325#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ 325#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
326#define NETIF_F_GSO 2048 /* Enable software GSO. */ 326#define NETIF_F_GSO 2048 /* Enable software GSO. */
327#define NETIF_F_LLTX 4096 /* LockLess TX */ 327#define NETIF_F_LLTX 4096 /* LockLess TX */
328#define NETIF_F_INTERNAL_STATS 8192 /* Use stats structure in net_device */
329 328
330 /* Segmentation offload features */ 329 /* Segmentation offload features */
331#define NETIF_F_GSO_SHIFT 16 330#define NETIF_F_GSO_SHIFT 16
@@ -654,8 +653,10 @@ static inline void netif_start_queue(struct net_device *dev)
654static inline void netif_wake_queue(struct net_device *dev) 653static inline void netif_wake_queue(struct net_device *dev)
655{ 654{
656#ifdef CONFIG_NETPOLL_TRAP 655#ifdef CONFIG_NETPOLL_TRAP
657 if (netpoll_trap()) 656 if (netpoll_trap()) {
657 clear_bit(__LINK_STATE_XOFF, &dev->state);
658 return; 658 return;
659 }
659#endif 660#endif
660 if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) 661 if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
661 __netif_schedule(dev); 662 __netif_schedule(dev);
@@ -663,10 +664,6 @@ static inline void netif_wake_queue(struct net_device *dev)
663 664
664static inline void netif_stop_queue(struct net_device *dev) 665static inline void netif_stop_queue(struct net_device *dev)
665{ 666{
666#ifdef CONFIG_NETPOLL_TRAP
667 if (netpoll_trap())
668 return;
669#endif
670 set_bit(__LINK_STATE_XOFF, &dev->state); 667 set_bit(__LINK_STATE_XOFF, &dev->state);
671} 668}
672 669
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2694cb3ca763..253a2b9be9d6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1471,6 +1471,11 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1471 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ 1471 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1472 skb = skb->next) 1472 skb = skb->next)
1473 1473
1474#define skb_queue_walk_safe(queue, skb, tmp) \
1475 for (skb = (queue)->next, tmp = skb->next; \
1476 skb != (struct sk_buff *)(queue); \
1477 skb = tmp, tmp = skb->next)
1478
1474#define skb_queue_reverse_walk(queue, skb) \ 1479#define skb_queue_reverse_walk(queue, skb) \
1475 for (skb = (queue)->prev; \ 1480 for (skb = (queue)->prev; \
1476 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ 1481 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 854aa6b543f1..802b3a38b041 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -40,6 +40,8 @@ enum
40 IPSTATS_MIB_FRAGCREATES, /* FragCreates */ 40 IPSTATS_MIB_FRAGCREATES, /* FragCreates */
41 IPSTATS_MIB_INMCASTPKTS, /* InMcastPkts */ 41 IPSTATS_MIB_INMCASTPKTS, /* InMcastPkts */
42 IPSTATS_MIB_OUTMCASTPKTS, /* OutMcastPkts */ 42 IPSTATS_MIB_OUTMCASTPKTS, /* OutMcastPkts */
43 IPSTATS_MIB_INBCASTPKTS, /* InBcastPkts */
44 IPSTATS_MIB_OUTBCASTPKTS, /* OutBcastPkts */
43 __IPSTATS_MIB_MAX 45 __IPSTATS_MIB_MAX
44}; 46};
45 47
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 9c656a5cf842..a5d53e0fe152 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -185,6 +185,11 @@ enum {
185#define XFRM_MSG_NEWSADINFO XFRM_MSG_NEWSADINFO 185#define XFRM_MSG_NEWSADINFO XFRM_MSG_NEWSADINFO
186 XFRM_MSG_GETSADINFO, 186 XFRM_MSG_GETSADINFO,
187#define XFRM_MSG_GETSADINFO XFRM_MSG_GETSADINFO 187#define XFRM_MSG_GETSADINFO XFRM_MSG_GETSADINFO
188
189 XFRM_MSG_NEWSPDINFO,
190#define XFRM_MSG_NEWSPDINFO XFRM_MSG_NEWSPDINFO
191 XFRM_MSG_GETSPDINFO,
192#define XFRM_MSG_GETSPDINFO XFRM_MSG_GETSPDINFO
188 __XFRM_MSG_MAX 193 __XFRM_MSG_MAX
189}; 194};
190#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1) 195#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -290,6 +295,36 @@ enum xfrm_sadattr_type_t {
290#define XFRMA_SAD_MAX (__XFRMA_SAD_MAX - 1) 295#define XFRMA_SAD_MAX (__XFRMA_SAD_MAX - 1)
291}; 296};
292 297
298/* SPD Table filter flags */
299enum xfrm_spd_ftype_t {
300 XFRM_SPD_UNSPEC,
301 XFRM_SPD_HMASK=1,
302 XFRM_SPD_HMAX=2,
303 XFRM_SPD_ICNT=4,
304 XFRM_SPD_OCNT=8,
305 XFRM_SPD_FCNT=16,
306 XFRM_SPD_ISCNT=32,
307 XFRM_SPD_OSCNT=64,
308 XFRM_SPD_FSCNT=128,
309 __XFRM_SPD_MAX
310
311#define XFRM_SPD_MAX (__XFRM_SPD_MAX - 1)
312};
313enum xfrm_spdattr_type_t {
314 XFRMA_SPD_UNSPEC,
315 XFRMA_SPDHMASK,
316 XFRMA_SPDHMAX,
317 XFRMA_SPDICNT,
318 XFRMA_SPDOCNT,
319 XFRMA_SPDFCNT,
320 XFRMA_SPDISCNT,
321 XFRMA_SPDOSCNT,
322 XFRMA_SPDFSCNT,
323 __XFRMA_SPD_MAX
324
325#define XFRMA_SPD_MAX (__XFRMA_SPD_MAX - 1)
326};
327
293struct xfrm_usersa_info { 328struct xfrm_usersa_info {
294 struct xfrm_selector sel; 329 struct xfrm_selector sel;
295 struct xfrm_id id; 330 struct xfrm_id id;
diff --git a/include/net/flow.h b/include/net/flow.h
index ce4b10d8b412..f3cc1f812619 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -97,4 +97,10 @@ extern void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
97extern void flow_cache_flush(void); 97extern void flow_cache_flush(void);
98extern atomic_t flow_cache_genid; 98extern atomic_t flow_cache_genid;
99 99
100static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
101{
102 return (fl1->proto == fl2->proto &&
103 !memcmp(&fl1->uli_u, &fl2->uli_u, sizeof(fl1->uli_u)));
104}
105
100#endif 106#endif
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
index 746e7416261e..fd70adbb3566 100644
--- a/include/net/iucv/iucv.h
+++ b/include/net/iucv/iucv.h
@@ -16,7 +16,7 @@
16 * completed a register, it can exploit the other functions. 16 * completed a register, it can exploit the other functions.
17 * For furthur reference on all IUCV functionality, refer to the 17 * For furthur reference on all IUCV functionality, refer to the
18 * CP Programming Services book, also available on the web thru 18 * CP Programming Services book, also available on the web thru
19 * www.ibm.com/s390/vm/pubs, manual # SC24-5760 19 * www.vm.ibm.com/pubs, manual # SC24-6084
20 * 20 *
21 * Definition of Return Codes 21 * Definition of Return Codes
22 * - All positive return codes including zero are reflected back 22 * - All positive return codes including zero are reflected back
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a385797f160a..ef8f9d4dae85 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -736,9 +736,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
736 736
737static inline void tcp_sync_left_out(struct tcp_sock *tp) 737static inline void tcp_sync_left_out(struct tcp_sock *tp)
738{ 738{
739 if (tp->rx_opt.sack_ok && 739 BUG_ON(tp->sacked_out + tp->lost_out > tp->packets_out);
740 (tp->sacked_out >= tp->packets_out - tp->lost_out))
741 tp->sacked_out = tp->packets_out - tp->lost_out;
742 tp->left_out = tp->sacked_out + tp->lost_out; 740 tp->left_out = tp->sacked_out + tp->lost_out;
743} 741}
744 742
@@ -1201,9 +1199,14 @@ static inline struct sk_buff *tcp_send_head(struct sock *sk)
1201 1199
1202static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) 1200static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
1203{ 1201{
1202 struct tcp_sock *tp = tcp_sk(sk);
1203
1204 sk->sk_send_head = skb->next; 1204 sk->sk_send_head = skb->next;
1205 if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) 1205 if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
1206 sk->sk_send_head = NULL; 1206 sk->sk_send_head = NULL;
1207 /* Don't override Nagle indefinately with F-RTO */
1208 if (tp->frto_counter == 2)
1209 tp->frto_counter = 3;
1207} 1210}
1208 1211
1209static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) 1212static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 8287081d77f2..66c2d3eec03c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -423,6 +423,18 @@ struct xfrm_sadinfo
423 u32 sadhmcnt; /* max allowed hash bkts */ 423 u32 sadhmcnt; /* max allowed hash bkts */
424 u32 sadcnt; /* current running count */ 424 u32 sadcnt; /* current running count */
425}; 425};
426
427struct xfrm_spdinfo
428{
429 u32 incnt;
430 u32 outcnt;
431 u32 fwdcnt;
432 u32 inscnt;
433 u32 outscnt;
434 u32 fwdscnt;
435 u32 spdhcnt;
436 u32 spdhmcnt;
437};
426#ifdef CONFIG_AUDITSYSCALL 438#ifdef CONFIG_AUDITSYSCALL
427extern void xfrm_audit_log(uid_t auid, u32 secid, int type, int result, 439extern void xfrm_audit_log(uid_t auid, u32 secid, int type, int result,
428 struct xfrm_policy *xp, struct xfrm_state *x); 440 struct xfrm_policy *xp, struct xfrm_state *x);
@@ -591,6 +603,10 @@ struct xfrm_dst
591 struct rt6_info rt6; 603 struct rt6_info rt6;
592 } u; 604 } u;
593 struct dst_entry *route; 605 struct dst_entry *route;
606#ifdef CONFIG_XFRM_SUB_POLICY
607 struct flowi *origin;
608 struct xfrm_selector *partner;
609#endif
594 u32 genid; 610 u32 genid;
595 u32 route_mtu_cached; 611 u32 route_mtu_cached;
596 u32 child_mtu_cached; 612 u32 child_mtu_cached;
@@ -603,6 +619,12 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
603 dst_release(xdst->route); 619 dst_release(xdst->route);
604 if (likely(xdst->u.dst.xfrm)) 620 if (likely(xdst->u.dst.xfrm))
605 xfrm_state_put(xdst->u.dst.xfrm); 621 xfrm_state_put(xdst->u.dst.xfrm);
622#ifdef CONFIG_XFRM_SUB_POLICY
623 kfree(xdst->origin);
624 xdst->origin = NULL;
625 kfree(xdst->partner);
626 xdst->partner = NULL;
627#endif
606} 628}
607 629
608extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); 630extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
@@ -946,6 +968,7 @@ extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
946extern int xfrm_state_delete(struct xfrm_state *x); 968extern int xfrm_state_delete(struct xfrm_state *x);
947extern void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info); 969extern void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info);
948extern void xfrm_sad_getinfo(struct xfrm_sadinfo *si); 970extern void xfrm_sad_getinfo(struct xfrm_sadinfo *si);
971extern void xfrm_spd_getinfo(struct xfrm_spdinfo *si);
949extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq); 972extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq);
950extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq); 973extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
951extern void xfrm_replay_notify(struct xfrm_state *x, int event); 974extern void xfrm_replay_notify(struct xfrm_state *x, int event);
diff --git a/net/core/dev.c b/net/core/dev.c
index d5e42d13bd67..eb999003bbb7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2101,26 +2101,23 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2101{ 2101{
2102 struct net_device_stats *stats = dev->get_stats(dev); 2102 struct net_device_stats *stats = dev->get_stats(dev);
2103 2103
2104 if (stats) { 2104 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2105 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " 2105 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2106 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", 2106 dev->name, stats->rx_bytes, stats->rx_packets,
2107 dev->name, stats->rx_bytes, stats->rx_packets, 2107 stats->rx_errors,
2108 stats->rx_errors, 2108 stats->rx_dropped + stats->rx_missed_errors,
2109 stats->rx_dropped + stats->rx_missed_errors, 2109 stats->rx_fifo_errors,
2110 stats->rx_fifo_errors, 2110 stats->rx_length_errors + stats->rx_over_errors +
2111 stats->rx_length_errors + stats->rx_over_errors + 2111 stats->rx_crc_errors + stats->rx_frame_errors,
2112 stats->rx_crc_errors + stats->rx_frame_errors, 2112 stats->rx_compressed, stats->multicast,
2113 stats->rx_compressed, stats->multicast, 2113 stats->tx_bytes, stats->tx_packets,
2114 stats->tx_bytes, stats->tx_packets, 2114 stats->tx_errors, stats->tx_dropped,
2115 stats->tx_errors, stats->tx_dropped, 2115 stats->tx_fifo_errors, stats->collisions,
2116 stats->tx_fifo_errors, stats->collisions, 2116 stats->tx_carrier_errors +
2117 stats->tx_carrier_errors + 2117 stats->tx_aborted_errors +
2118 stats->tx_aborted_errors + 2118 stats->tx_window_errors +
2119 stats->tx_window_errors + 2119 stats->tx_heartbeat_errors,
2120 stats->tx_heartbeat_errors, 2120 stats->tx_compressed);
2121 stats->tx_compressed);
2122 } else
2123 seq_printf(seq, "%6s: No statistics available.\n", dev->name);
2124} 2121}
2125 2122
2126/* 2123/*
@@ -3257,11 +3254,9 @@ out:
3257 mutex_unlock(&net_todo_run_mutex); 3254 mutex_unlock(&net_todo_run_mutex);
3258} 3255}
3259 3256
3260static struct net_device_stats *maybe_internal_stats(struct net_device *dev) 3257static struct net_device_stats *internal_stats(struct net_device *dev)
3261{ 3258{
3262 if (dev->features & NETIF_F_INTERNAL_STATS) 3259 return &dev->stats;
3263 return &dev->stats;
3264 return NULL;
3265} 3260}
3266 3261
3267/** 3262/**
@@ -3299,7 +3294,7 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
3299 if (sizeof_priv) 3294 if (sizeof_priv)
3300 dev->priv = netdev_priv(dev); 3295 dev->priv = netdev_priv(dev);
3301 3296
3302 dev->get_stats = maybe_internal_stats; 3297 dev->get_stats = internal_stats;
3303 setup(dev); 3298 setup(dev);
3304 strcpy(dev->name, name); 3299 strcpy(dev->name, name);
3305 return dev; 3300 return dev;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 324e7e0fdb2a..97069399d864 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -329,6 +329,7 @@ drop:
329static inline int ip_rcv_finish(struct sk_buff *skb) 329static inline int ip_rcv_finish(struct sk_buff *skb)
330{ 330{
331 const struct iphdr *iph = ip_hdr(skb); 331 const struct iphdr *iph = ip_hdr(skb);
332 struct rtable *rt;
332 333
333 /* 334 /*
334 * Initialise the virtual path cache for the packet. It describes 335 * Initialise the virtual path cache for the packet. It describes
@@ -340,6 +341,8 @@ static inline int ip_rcv_finish(struct sk_buff *skb)
340 if (unlikely(err)) { 341 if (unlikely(err)) {
341 if (err == -EHOSTUNREACH) 342 if (err == -EHOSTUNREACH)
342 IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); 343 IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
344 else if (err == -ENETUNREACH)
345 IP_INC_STATS_BH(IPSTATS_MIB_INNOROUTES);
343 goto drop; 346 goto drop;
344 } 347 }
345 } 348 }
@@ -358,6 +361,12 @@ static inline int ip_rcv_finish(struct sk_buff *skb)
358 if (iph->ihl > 5 && ip_rcv_options(skb)) 361 if (iph->ihl > 5 && ip_rcv_options(skb))
359 goto drop; 362 goto drop;
360 363
364 rt = (struct rtable*)skb->dst;
365 if (rt->rt_type == RTN_MULTICAST)
366 IP_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS);
367 else if (rt->rt_type == RTN_BROADCAST)
368 IP_INC_STATS_BH(IPSTATS_MIB_INBCASTPKTS);
369
361 return dst_input(skb); 370 return dst_input(skb);
362 371
363drop: 372drop:
@@ -414,7 +423,10 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
414 goto inhdr_error; 423 goto inhdr_error;
415 424
416 len = ntohs(iph->tot_len); 425 len = ntohs(iph->tot_len);
417 if (skb->len < len || len < (iph->ihl*4)) 426 if (skb->len < len) {
427 IP_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS);
428 goto drop;
429 } else if (len < (iph->ihl*4))
418 goto inhdr_error; 430 goto inhdr_error;
419 431
420 /* Our transport medium may have padded the buffer out. Now we know it 432 /* Our transport medium may have padded the buffer out. Now we know it
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 534650cad3a8..d6427d918512 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -160,9 +160,15 @@ EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
160static inline int ip_finish_output2(struct sk_buff *skb) 160static inline int ip_finish_output2(struct sk_buff *skb)
161{ 161{
162 struct dst_entry *dst = skb->dst; 162 struct dst_entry *dst = skb->dst;
163 struct rtable *rt = (struct rtable *)dst;
163 struct net_device *dev = dst->dev; 164 struct net_device *dev = dst->dev;
164 int hh_len = LL_RESERVED_SPACE(dev); 165 int hh_len = LL_RESERVED_SPACE(dev);
165 166
167 if (rt->rt_type == RTN_MULTICAST)
168 IP_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS);
169 else if (rt->rt_type == RTN_BROADCAST)
170 IP_INC_STATS(IPSTATS_MIB_OUTBCASTPKTS);
171
166 /* Be paranoid, rather than too clever. */ 172 /* Be paranoid, rather than too clever. */
167 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) { 173 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
168 struct sk_buff *skb2; 174 struct sk_buff *skb2;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2cf9a898ce50..d6e488668171 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1573,14 +1573,12 @@ void tcp_close(struct sock *sk, long timeout)
1573 1573
1574 sk_stream_mem_reclaim(sk); 1574 sk_stream_mem_reclaim(sk);
1575 1575
1576 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section 1576 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1577 * 3.10, we send a RST here because data was lost. To 1577 * data was lost. To witness the awful effects of the old behavior of
1578 * witness the awful effects of the old behavior of always 1578 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1579 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start 1579 * GET in an FTP client, suspend the process, wait for the client to
1580 * a bulk GET in an FTP client, suspend the process, wait 1580 * advertise a zero window, then kill -9 the FTP client, wheee...
1581 * for the client to advertise a zero window, then kill -9 1581 * Note: timeout is always zero in such a case.
1582 * the FTP client, wheee... Note: timeout is always zero
1583 * in such a case.
1584 */ 1582 */
1585 if (data_was_unread) { 1583 if (data_was_unread) {
1586 /* Unread data was tossed, zap the connection. */ 1584 /* Unread data was tossed, zap the connection. */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 051f0f815f17..7641b2761a14 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1265,20 +1265,15 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1265 return flag; 1265 return flag;
1266} 1266}
1267 1267
1268/* F-RTO can only be used if these conditions are satisfied: 1268/* F-RTO can only be used if TCP has never retransmitted anything other than
1269 * - there must be some unsent new data 1269 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
1270 * - the advertised window should allow sending it
1271 * - TCP has never retransmitted anything other than head (SACK enhanced
1272 * variant from Appendix B of RFC4138 is more robust here)
1273 */ 1270 */
1274int tcp_use_frto(struct sock *sk) 1271int tcp_use_frto(struct sock *sk)
1275{ 1272{
1276 const struct tcp_sock *tp = tcp_sk(sk); 1273 const struct tcp_sock *tp = tcp_sk(sk);
1277 struct sk_buff *skb; 1274 struct sk_buff *skb;
1278 1275
1279 if (!sysctl_tcp_frto || !tcp_send_head(sk) || 1276 if (!sysctl_tcp_frto)
1280 after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
1281 tp->snd_una + tp->snd_wnd))
1282 return 0; 1277 return 0;
1283 1278
1284 if (IsSackFrto()) 1279 if (IsSackFrto())
@@ -2642,7 +2637,9 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag)
2642 * algorithm is not part of the F-RTO detection algorithm 2637 * algorithm is not part of the F-RTO detection algorithm
2643 * given in RFC4138 but can be selected separately). 2638 * given in RFC4138 but can be selected separately).
2644 * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss 2639 * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss
2645 * and TCP falls back to conventional RTO recovery. 2640 * and TCP falls back to conventional RTO recovery. F-RTO allows overriding
2641 * of Nagle, this is done using frto_counter states 2 and 3, when a new data
2642 * segment of any size sent during F-RTO, state 2 is upgraded to 3.
2646 * 2643 *
2647 * Rationale: if the RTO was spurious, new ACKs should arrive from the 2644 * Rationale: if the RTO was spurious, new ACKs should arrive from the
2648 * original window even after we transmit two new data segments. 2645 * original window even after we transmit two new data segments.
@@ -2671,7 +2668,7 @@ static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
2671 inet_csk(sk)->icsk_retransmits = 0; 2668 inet_csk(sk)->icsk_retransmits = 0;
2672 2669
2673 if (!before(tp->snd_una, tp->frto_highmark)) { 2670 if (!before(tp->snd_una, tp->frto_highmark)) {
2674 tcp_enter_frto_loss(sk, tp->frto_counter + 1, flag); 2671 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
2675 return 1; 2672 return 1;
2676 } 2673 }
2677 2674
@@ -2697,7 +2694,7 @@ static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
2697 return 1; 2694 return 1;
2698 } 2695 }
2699 2696
2700 if ((tp->frto_counter == 2) && 2697 if ((tp->frto_counter >= 2) &&
2701 (!(flag&FLAG_FORWARD_PROGRESS) || 2698 (!(flag&FLAG_FORWARD_PROGRESS) ||
2702 ((flag&FLAG_DATA_SACKED) && !(flag&FLAG_ONLY_ORIG_SACKED)))) { 2699 ((flag&FLAG_DATA_SACKED) && !(flag&FLAG_ONLY_ORIG_SACKED)))) {
2703 /* RFC4138 shortcoming (see comment above) */ 2700 /* RFC4138 shortcoming (see comment above) */
@@ -2710,10 +2707,19 @@ static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
2710 } 2707 }
2711 2708
2712 if (tp->frto_counter == 1) { 2709 if (tp->frto_counter == 1) {
2710 /* Sending of the next skb must be allowed or no FRTO */
2711 if (!tcp_send_head(sk) ||
2712 after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
2713 tp->snd_una + tp->snd_wnd)) {
2714 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3),
2715 flag);
2716 return 1;
2717 }
2718
2713 tp->snd_cwnd = tcp_packets_in_flight(tp) + 2; 2719 tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
2714 tp->frto_counter = 2; 2720 tp->frto_counter = 2;
2715 return 1; 2721 return 1;
2716 } else /* frto_counter == 2 */ { 2722 } else {
2717 switch (sysctl_tcp_frto_response) { 2723 switch (sysctl_tcp_frto_response) {
2718 case 2: 2724 case 2:
2719 tcp_undo_spur_to_response(sk, flag); 2725 tcp_undo_spur_to_response(sk, flag);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e70a6840cb64..0faacf9c419d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1035,8 +1035,10 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1035 if (nonagle & TCP_NAGLE_PUSH) 1035 if (nonagle & TCP_NAGLE_PUSH)
1036 return 1; 1036 return 1;
1037 1037
1038 /* Don't use the nagle rule for urgent data (or for the final FIN). */ 1038 /* Don't use the nagle rule for urgent data (or for the final FIN).
1039 if (tp->urg_mode || 1039 * Nagle can be ignored during F-RTO too (see RFC4138).
1040 */
1041 if (tp->urg_mode || (tp->frto_counter == 2) ||
1040 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) 1042 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
1041 return 1; 1043 return 1;
1042 1044
@@ -2035,7 +2037,7 @@ void tcp_send_fin(struct sock *sk)
2035/* We get here when a process closes a file descriptor (either due to 2037/* We get here when a process closes a file descriptor (either due to
2036 * an explicit close() or as a byproduct of exit()'ing) and there 2038 * an explicit close() or as a byproduct of exit()'ing) and there
2037 * was unread data in the receive queue. This behavior is recommended 2039 * was unread data in the receive queue. This behavior is recommended
2038 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM 2040 * by RFC 2525, section 2.17. -DaveM
2039 */ 2041 */
2040void tcp_send_active_reset(struct sock *sk, gfp_t priority) 2042void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2041{ 2043{
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cec0f2cc49b7..144970704c2c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -114,14 +114,33 @@ DEFINE_RWLOCK(udp_hash_lock);
114 114
115static int udp_port_rover; 115static int udp_port_rover;
116 116
117static inline int __udp_lib_lport_inuse(__u16 num, struct hlist_head udptable[]) 117/*
118 * Note about this hash function :
119 * Typical use is probably daddr = 0, only dport is going to vary hash
120 */
121static inline unsigned int hash_port_and_addr(__u16 port, __be32 addr)
122{
123 addr ^= addr >> 16;
124 addr ^= addr >> 8;
125 return port ^ addr;
126}
127
128static inline int __udp_lib_port_inuse(unsigned int hash, int port,
129 __be32 daddr, struct hlist_head udptable[])
118{ 130{
119 struct sock *sk; 131 struct sock *sk;
120 struct hlist_node *node; 132 struct hlist_node *node;
133 struct inet_sock *inet;
121 134
122 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) 135 sk_for_each(sk, node, &udptable[hash & (UDP_HTABLE_SIZE - 1)]) {
123 if (sk->sk_hash == num) 136 if (sk->sk_hash != hash)
137 continue;
138 inet = inet_sk(sk);
139 if (inet->num != port)
140 continue;
141 if (inet->rcv_saddr == daddr)
124 return 1; 142 return 1;
143 }
125 return 0; 144 return 0;
126} 145}
127 146
@@ -142,6 +161,7 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
142 struct hlist_node *node; 161 struct hlist_node *node;
143 struct hlist_head *head; 162 struct hlist_head *head;
144 struct sock *sk2; 163 struct sock *sk2;
164 unsigned int hash;
145 int error = 1; 165 int error = 1;
146 166
147 write_lock_bh(&udp_hash_lock); 167 write_lock_bh(&udp_hash_lock);
@@ -156,7 +176,9 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
156 for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { 176 for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
157 int size; 177 int size;
158 178
159 head = &udptable[result & (UDP_HTABLE_SIZE - 1)]; 179 hash = hash_port_and_addr(result,
180 inet_sk(sk)->rcv_saddr);
181 head = &udptable[hash & (UDP_HTABLE_SIZE - 1)];
160 if (hlist_empty(head)) { 182 if (hlist_empty(head)) {
161 if (result > sysctl_local_port_range[1]) 183 if (result > sysctl_local_port_range[1])
162 result = sysctl_local_port_range[0] + 184 result = sysctl_local_port_range[0] +
@@ -181,7 +203,10 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
181 result = sysctl_local_port_range[0] 203 result = sysctl_local_port_range[0]
182 + ((result - sysctl_local_port_range[0]) & 204 + ((result - sysctl_local_port_range[0]) &
183 (UDP_HTABLE_SIZE - 1)); 205 (UDP_HTABLE_SIZE - 1));
184 if (! __udp_lib_lport_inuse(result, udptable)) 206 hash = hash_port_and_addr(result,
207 inet_sk(sk)->rcv_saddr);
208 if (! __udp_lib_port_inuse(hash, result,
209 inet_sk(sk)->rcv_saddr, udptable))
185 break; 210 break;
186 } 211 }
187 if (i >= (1 << 16) / UDP_HTABLE_SIZE) 212 if (i >= (1 << 16) / UDP_HTABLE_SIZE)
@@ -189,11 +214,13 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
189gotit: 214gotit:
190 *port_rover = snum = result; 215 *port_rover = snum = result;
191 } else { 216 } else {
192 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; 217 hash = hash_port_and_addr(snum, inet_sk(sk)->rcv_saddr);
218 head = &udptable[hash & (UDP_HTABLE_SIZE - 1)];
193 219
194 sk_for_each(sk2, node, head) 220 sk_for_each(sk2, node, head)
195 if (sk2->sk_hash == snum && 221 if (sk2->sk_hash == hash &&
196 sk2 != sk && 222 sk2 != sk &&
223 inet_sk(sk2)->num == snum &&
197 (!sk2->sk_reuse || !sk->sk_reuse) && 224 (!sk2->sk_reuse || !sk->sk_reuse) &&
198 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if 225 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
199 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 226 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
@@ -201,9 +228,9 @@ gotit:
201 goto fail; 228 goto fail;
202 } 229 }
203 inet_sk(sk)->num = snum; 230 inet_sk(sk)->num = snum;
204 sk->sk_hash = snum; 231 sk->sk_hash = hash;
205 if (sk_unhashed(sk)) { 232 if (sk_unhashed(sk)) {
206 head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; 233 head = &udptable[hash & (UDP_HTABLE_SIZE - 1)];
207 sk_add_node(sk, head); 234 sk_add_node(sk, head);
208 sock_prot_inc_use(sk->sk_prot); 235 sock_prot_inc_use(sk->sk_prot);
209 } 236 }
@@ -242,63 +269,78 @@ static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport,
242{ 269{
243 struct sock *sk, *result = NULL; 270 struct sock *sk, *result = NULL;
244 struct hlist_node *node; 271 struct hlist_node *node;
245 unsigned short hnum = ntohs(dport); 272 unsigned int hash, hashwild;
246 int badness = -1; 273 int score, best = -1;
274
275 hash = hash_port_and_addr(ntohs(dport), daddr);
276 hashwild = hash_port_and_addr(ntohs(dport), 0);
247 277
248 read_lock(&udp_hash_lock); 278 read_lock(&udp_hash_lock);
249 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 279
280lookup:
281
282 sk_for_each(sk, node, &udptable[hash & (UDP_HTABLE_SIZE - 1)]) {
250 struct inet_sock *inet = inet_sk(sk); 283 struct inet_sock *inet = inet_sk(sk);
251 284
252 if (sk->sk_hash == hnum && !ipv6_only_sock(sk)) { 285 if (sk->sk_hash != hash || ipv6_only_sock(sk) ||
253 int score = (sk->sk_family == PF_INET ? 1 : 0); 286 inet->num != dport)
254 if (inet->rcv_saddr) { 287 continue;
255 if (inet->rcv_saddr != daddr) 288
256 continue; 289 score = (sk->sk_family == PF_INET ? 1 : 0);
257 score+=2; 290 if (inet->rcv_saddr) {
258 } 291 if (inet->rcv_saddr != daddr)
259 if (inet->daddr) { 292 continue;
260 if (inet->daddr != saddr) 293 score+=2;
261 continue; 294 }
262 score+=2; 295 if (inet->daddr) {
263 } 296 if (inet->daddr != saddr)
264 if (inet->dport) { 297 continue;
265 if (inet->dport != sport) 298 score+=2;
266 continue; 299 }
267 score+=2; 300 if (inet->dport) {
268 } 301 if (inet->dport != sport)
269 if (sk->sk_bound_dev_if) { 302 continue;
270 if (sk->sk_bound_dev_if != dif) 303 score+=2;
271 continue; 304 }
272 score+=2; 305 if (sk->sk_bound_dev_if) {
273 } 306 if (sk->sk_bound_dev_if != dif)
274 if (score == 9) { 307 continue;
275 result = sk; 308 score+=2;
276 break; 309 }
277 } else if (score > badness) { 310 if (score == 9) {
278 result = sk; 311 result = sk;
279 badness = score; 312 goto found;
280 } 313 } else if (score > best) {
314 result = sk;
315 best = score;
281 } 316 }
282 } 317 }
318
319 if (hash != hashwild) {
320 hash = hashwild;
321 goto lookup;
322 }
323found:
283 if (result) 324 if (result)
284 sock_hold(result); 325 sock_hold(result);
285 read_unlock(&udp_hash_lock); 326 read_unlock(&udp_hash_lock);
286 return result; 327 return result;
287} 328}
288 329
289static inline struct sock *udp_v4_mcast_next(struct sock *sk, 330static inline struct sock *udp_v4_mcast_next(
290 __be16 loc_port, __be32 loc_addr, 331 struct sock *sk,
291 __be16 rmt_port, __be32 rmt_addr, 332 unsigned int hnum, __be16 loc_port, __be32 loc_addr,
292 int dif) 333 __be16 rmt_port, __be32 rmt_addr,
334 int dif)
293{ 335{
294 struct hlist_node *node; 336 struct hlist_node *node;
295 struct sock *s = sk; 337 struct sock *s = sk;
296 unsigned short hnum = ntohs(loc_port);
297 338
298 sk_for_each_from(s, node) { 339 sk_for_each_from(s, node) {
299 struct inet_sock *inet = inet_sk(s); 340 struct inet_sock *inet = inet_sk(s);
300 341
301 if (s->sk_hash != hnum || 342 if (s->sk_hash != hnum ||
343 inet->num != loc_port ||
302 (inet->daddr && inet->daddr != rmt_addr) || 344 (inet->daddr && inet->daddr != rmt_addr) ||
303 (inet->dport != rmt_port && inet->dport) || 345 (inet->dport != rmt_port && inet->dport) ||
304 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) || 346 (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
@@ -1129,29 +1171,44 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
1129 __be32 saddr, __be32 daddr, 1171 __be32 saddr, __be32 daddr,
1130 struct hlist_head udptable[]) 1172 struct hlist_head udptable[])
1131{ 1173{
1132 struct sock *sk; 1174 struct sock *sk, *skw, *sknext;
1133 int dif; 1175 int dif;
1176 unsigned int hash = hash_port_and_addr(ntohs(uh->dest), daddr);
1177 unsigned int hashwild = hash_port_and_addr(ntohs(uh->dest), 0);
1134 1178
1135 read_lock(&udp_hash_lock);
1136 sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]);
1137 dif = skb->dev->ifindex; 1179 dif = skb->dev->ifindex;
1138 sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
1139 if (sk) {
1140 struct sock *sknext = NULL;
1141 1180
1181 read_lock(&udp_hash_lock);
1182
1183 sk = sk_head(&udptable[hash & (UDP_HTABLE_SIZE - 1)]);
1184 skw = sk_head(&udptable[hashwild & (UDP_HTABLE_SIZE - 1)]);
1185
1186 sk = udp_v4_mcast_next(sk, hash, uh->dest, daddr, uh->source, saddr, dif);
1187 if (!sk) {
1188 hash = hashwild;
1189 sk = udp_v4_mcast_next(skw, hash, uh->dest, daddr, uh->source,
1190 saddr, dif);
1191 }
1192 if (sk) {
1142 do { 1193 do {
1143 struct sk_buff *skb1 = skb; 1194 struct sk_buff *skb1 = skb;
1144 1195 sknext = udp_v4_mcast_next(sk_next(sk), hash, uh->dest,
1145 sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr, 1196 daddr, uh->source, saddr, dif);
1146 uh->source, saddr, dif); 1197 if (!sknext && hash != hashwild) {
1198 hash = hashwild;
1199 sknext = udp_v4_mcast_next(skw, hash, uh->dest,
1200 daddr, uh->source, saddr, dif);
1201 }
1147 if (sknext) 1202 if (sknext)
1148 skb1 = skb_clone(skb, GFP_ATOMIC); 1203 skb1 = skb_clone(skb, GFP_ATOMIC);
1149 1204
1150 if (skb1) { 1205 if (skb1) {
1151 int ret = udp_queue_rcv_skb(sk, skb1); 1206 int ret = udp_queue_rcv_skb(sk, skb1);
1152 if (ret > 0) 1207 if (ret > 0)
1153 /* we should probably re-process instead 1208 /*
1154 * of dropping packets here. */ 1209 * we should probably re-process
1210 * instead of dropping packets here.
1211 */
1155 kfree_skb(skb1); 1212 kfree_skb(skb1);
1156 } 1213 }
1157 sk = sknext; 1214 sk = sknext;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e04e49373505..3452433cbc96 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2359,8 +2359,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2359 break; 2359 break;
2360 2360
2361 case NETDEV_CHANGENAME: 2361 case NETDEV_CHANGENAME:
2362#ifdef CONFIG_SYSCTL
2363 if (idev) { 2362 if (idev) {
2363 snmp6_unregister_dev(idev);
2364#ifdef CONFIG_SYSCTL
2364 addrconf_sysctl_unregister(&idev->cnf); 2365 addrconf_sysctl_unregister(&idev->cnf);
2365 neigh_sysctl_unregister(idev->nd_parms); 2366 neigh_sysctl_unregister(idev->nd_parms);
2366 neigh_sysctl_register(dev, idev->nd_parms, 2367 neigh_sysctl_register(dev, idev->nd_parms,
@@ -2368,8 +2369,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2368 &ndisc_ifinfo_sysctl_change, 2369 &ndisc_ifinfo_sysctl_change,
2369 NULL); 2370 NULL);
2370 addrconf_sysctl_register(idev, &idev->cnf); 2371 addrconf_sysctl_register(idev, &idev->cnf);
2371 }
2372#endif 2372#endif
2373 snmp6_register_dev(idev);
2374 }
2373 break; 2375 break;
2374 } 2376 }
2375 2377
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index acb306a5dd56..920dc9cf6a84 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -223,6 +223,7 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
223 return -EINVAL; 223 return -EINVAL;
224 remove_proc_entry(idev->stats.proc_dir_entry->name, 224 remove_proc_entry(idev->stats.proc_dir_entry->name,
225 proc_net_devsnmp6); 225 proc_net_devsnmp6);
226 idev->stats.proc_dir_entry = NULL;
226 return 0; 227 return 0;
227} 228}
228 229
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 538499a89975..5502cc948dfb 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -261,7 +261,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
261 __be32 spi; 261 __be32 spi;
262 262
263 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr); 263 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
264 return xfrm6_rcv_spi(skb, spi); 264 return xfrm6_rcv_spi(skb, spi) > 0 ? : 0;
265} 265}
266 266
267static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 267static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 60f293842a39..903bdb6eaaa1 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -90,20 +90,43 @@ struct iucv_irq_data {
90 u32 res2[8]; 90 u32 res2[8];
91}; 91};
92 92
93struct iucv_work { 93struct iucv_irq_list {
94 struct list_head list; 94 struct list_head list;
95 struct iucv_irq_data data; 95 struct iucv_irq_data data;
96}; 96};
97 97
98static LIST_HEAD(iucv_work_queue);
99static DEFINE_SPINLOCK(iucv_work_lock);
100
101static struct iucv_irq_data *iucv_irq_data; 98static struct iucv_irq_data *iucv_irq_data;
102static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE; 99static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE;
103static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE; 100static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE;
104 101
105static void iucv_tasklet_handler(unsigned long); 102/*
106static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_handler,0); 103 * Queue of interrupt buffers lock for delivery via the tasklet
104 * (fast but can't call smp_call_function).
105 */
106static LIST_HEAD(iucv_task_queue);
107
108/*
109 * The tasklet for fast delivery of iucv interrupts.
110 */
111static void iucv_tasklet_fn(unsigned long);
112static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0);
113
114/*
115 * Queue of interrupt buffers for delivery via a work queue
116 * (slower but can call smp_call_function).
117 */
118static LIST_HEAD(iucv_work_queue);
119
120/*
121 * The work element to deliver path pending interrupts.
122 */
123static void iucv_work_fn(struct work_struct *work);
124static DECLARE_WORK(iucv_work, iucv_work_fn);
125
126/*
127 * Spinlock protecting task and work queue.
128 */
129static DEFINE_SPINLOCK(iucv_queue_lock);
107 130
108enum iucv_command_codes { 131enum iucv_command_codes {
109 IUCV_QUERY = 0, 132 IUCV_QUERY = 0,
@@ -147,10 +170,10 @@ static unsigned long iucv_max_pathid;
147static DEFINE_SPINLOCK(iucv_table_lock); 170static DEFINE_SPINLOCK(iucv_table_lock);
148 171
149/* 172/*
150 * iucv_tasklet_cpu: contains the number of the cpu executing the tasklet. 173 * iucv_active_cpu: contains the number of the cpu executing the tasklet
151 * Needed for iucv_path_sever called from tasklet. 174 * or the work handler. Needed for iucv_path_sever called from tasklet.
152 */ 175 */
153static int iucv_tasklet_cpu = -1; 176static int iucv_active_cpu = -1;
154 177
155/* 178/*
156 * Mutex and wait queue for iucv_register/iucv_unregister. 179 * Mutex and wait queue for iucv_register/iucv_unregister.
@@ -449,17 +472,19 @@ static void iucv_setmask_mp(void)
449{ 472{
450 int cpu; 473 int cpu;
451 474
475 preempt_disable();
452 for_each_online_cpu(cpu) 476 for_each_online_cpu(cpu)
453 /* Enable all cpus with a declared buffer. */ 477 /* Enable all cpus with a declared buffer. */
454 if (cpu_isset(cpu, iucv_buffer_cpumask) && 478 if (cpu_isset(cpu, iucv_buffer_cpumask) &&
455 !cpu_isset(cpu, iucv_irq_cpumask)) 479 !cpu_isset(cpu, iucv_irq_cpumask))
456 smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu); 480 smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu);
481 preempt_enable();
457} 482}
458 483
459/** 484/**
460 * iucv_setmask_up 485 * iucv_setmask_up
461 * 486 *
462 * Allow iucv interrupts on a single cpus. 487 * Allow iucv interrupts on a single cpu.
463 */ 488 */
464static void iucv_setmask_up(void) 489static void iucv_setmask_up(void)
465{ 490{
@@ -493,8 +518,10 @@ static int iucv_enable(void)
493 goto out; 518 goto out;
494 /* Declare per cpu buffers. */ 519 /* Declare per cpu buffers. */
495 rc = -EIO; 520 rc = -EIO;
521 preempt_disable();
496 for_each_online_cpu(cpu) 522 for_each_online_cpu(cpu)
497 smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu); 523 smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
524 preempt_enable();
498 if (cpus_empty(iucv_buffer_cpumask)) 525 if (cpus_empty(iucv_buffer_cpumask))
499 /* No cpu could declare an iucv buffer. */ 526 /* No cpu could declare an iucv buffer. */
500 goto out_path; 527 goto out_path;
@@ -584,48 +611,49 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
584 return iucv_call_b2f0(IUCV_SEVER, parm); 611 return iucv_call_b2f0(IUCV_SEVER, parm);
585} 612}
586 613
614#ifdef CONFIG_SMP
587/** 615/**
588 * __iucv_cleanup_pathid 616 * __iucv_cleanup_queue
589 * @dummy: unused dummy argument 617 * @dummy: unused dummy argument
590 * 618 *
591 * Nop function called via smp_call_function to force work items from 619 * Nop function called via smp_call_function to force work items from
592 * pending external iucv interrupts to the work queue. 620 * pending external iucv interrupts to the work queue.
593 */ 621 */
594static void __iucv_cleanup_pathid(void *dummy) 622static void __iucv_cleanup_queue(void *dummy)
595{ 623{
596} 624}
625#endif
597 626
598/** 627/**
599 * iucv_cleanup_pathid 628 * iucv_cleanup_queue
600 * @pathid: 16 bit pathid
601 * 629 *
602 * Function called after a path has been severed to find all remaining 630 * Function called after a path has been severed to find all remaining
603 * work items for the now stale pathid. The caller needs to hold the 631 * work items for the now stale pathid. The caller needs to hold the
604 * iucv_table_lock. 632 * iucv_table_lock.
605 */ 633 */
606static void iucv_cleanup_pathid(u16 pathid) 634static void iucv_cleanup_queue(void)
607{ 635{
608 struct iucv_work *p, *n; 636 struct iucv_irq_list *p, *n;
609 637
610 /* 638 /*
611 * Path is severed, the pathid can be reused immediatly on 639 * When a path is severed, the pathid can be reused immediatly
612 * a iucv connect or a connection pending interrupt. 640 * on a iucv connect or a connection pending interrupt. Remove
613 * iucv_path_connect and connection pending interrupt will 641 * all entries from the task queue that refer to a stale pathid
614 * wait until the iucv_table_lock is released before the 642 * (iucv_path_table[ix] == NULL). Only then do the iucv connect
615 * recycled pathid enters the system. 643 * or deliver the connection pending interrupt. To get all the
616 * Force remaining interrupts to the work queue, then 644 * pending interrupts force them to the work queue by calling
617 * scan the work queue for items of this path. 645 * an empty function on all cpus.
618 */ 646 */
619 smp_call_function(__iucv_cleanup_pathid, NULL, 0, 1); 647 smp_call_function(__iucv_cleanup_queue, NULL, 0, 1);
620 spin_lock_irq(&iucv_work_lock); 648 spin_lock_irq(&iucv_queue_lock);
621 list_for_each_entry_safe(p, n, &iucv_work_queue, list) { 649 list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
622 /* Remove work items for pathid except connection pending */ 650 /* Remove stale work items from the task queue. */
623 if (p->data.ippathid == pathid && p->data.iptype != 0x01) { 651 if (iucv_path_table[p->data.ippathid] == NULL) {
624 list_del(&p->list); 652 list_del(&p->list);
625 kfree(p); 653 kfree(p);
626 } 654 }
627 } 655 }
628 spin_unlock_irq(&iucv_work_lock); 656 spin_unlock_irq(&iucv_queue_lock);
629} 657}
630 658
631/** 659/**
@@ -684,7 +712,6 @@ void iucv_unregister(struct iucv_handler *handler, int smp)
684 iucv_sever_pathid(p->pathid, NULL); 712 iucv_sever_pathid(p->pathid, NULL);
685 iucv_path_table[p->pathid] = NULL; 713 iucv_path_table[p->pathid] = NULL;
686 list_del(&p->list); 714 list_del(&p->list);
687 iucv_cleanup_pathid(p->pathid);
688 iucv_path_free(p); 715 iucv_path_free(p);
689 } 716 }
690 spin_unlock_bh(&iucv_table_lock); 717 spin_unlock_bh(&iucv_table_lock);
@@ -757,9 +784,9 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
757 union iucv_param *parm; 784 union iucv_param *parm;
758 int rc; 785 int rc;
759 786
760 preempt_disable(); 787 BUG_ON(in_atomic());
761 if (iucv_tasklet_cpu != smp_processor_id()) 788 spin_lock_bh(&iucv_table_lock);
762 spin_lock_bh(&iucv_table_lock); 789 iucv_cleanup_queue();
763 parm = percpu_ptr(iucv_param, smp_processor_id()); 790 parm = percpu_ptr(iucv_param, smp_processor_id());
764 memset(parm, 0, sizeof(union iucv_param)); 791 memset(parm, 0, sizeof(union iucv_param));
765 parm->ctrl.ipmsglim = path->msglim; 792 parm->ctrl.ipmsglim = path->msglim;
@@ -794,9 +821,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
794 rc = -EIO; 821 rc = -EIO;
795 } 822 }
796 } 823 }
797 if (iucv_tasklet_cpu != smp_processor_id()) 824 spin_unlock_bh(&iucv_table_lock);
798 spin_unlock_bh(&iucv_table_lock);
799 preempt_enable();
800 return rc; 825 return rc;
801} 826}
802 827
@@ -867,15 +892,14 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
867 892
868 893
869 preempt_disable(); 894 preempt_disable();
870 if (iucv_tasklet_cpu != smp_processor_id()) 895 if (iucv_active_cpu != smp_processor_id())
871 spin_lock_bh(&iucv_table_lock); 896 spin_lock_bh(&iucv_table_lock);
872 rc = iucv_sever_pathid(path->pathid, userdata); 897 rc = iucv_sever_pathid(path->pathid, userdata);
873 if (!rc) { 898 if (!rc) {
874 iucv_path_table[path->pathid] = NULL; 899 iucv_path_table[path->pathid] = NULL;
875 list_del_init(&path->list); 900 list_del_init(&path->list);
876 iucv_cleanup_pathid(path->pathid);
877 } 901 }
878 if (iucv_tasklet_cpu != smp_processor_id()) 902 if (iucv_active_cpu != smp_processor_id())
879 spin_unlock_bh(&iucv_table_lock); 903 spin_unlock_bh(&iucv_table_lock);
880 preempt_enable(); 904 preempt_enable();
881 return rc; 905 return rc;
@@ -1244,8 +1268,7 @@ static void iucv_path_complete(struct iucv_irq_data *data)
1244 struct iucv_path_complete *ipc = (void *) data; 1268 struct iucv_path_complete *ipc = (void *) data;
1245 struct iucv_path *path = iucv_path_table[ipc->ippathid]; 1269 struct iucv_path *path = iucv_path_table[ipc->ippathid];
1246 1270
1247 BUG_ON(!path || !path->handler); 1271 if (path && path->handler && path->handler->path_complete)
1248 if (path->handler->path_complete)
1249 path->handler->path_complete(path, ipc->ipuser); 1272 path->handler->path_complete(path, ipc->ipuser);
1250} 1273}
1251 1274
@@ -1273,14 +1296,14 @@ static void iucv_path_severed(struct iucv_irq_data *data)
1273 struct iucv_path_severed *ips = (void *) data; 1296 struct iucv_path_severed *ips = (void *) data;
1274 struct iucv_path *path = iucv_path_table[ips->ippathid]; 1297 struct iucv_path *path = iucv_path_table[ips->ippathid];
1275 1298
1276 BUG_ON(!path || !path->handler); 1299 if (!path || !path->handler) /* Already severed */
1300 return;
1277 if (path->handler->path_severed) 1301 if (path->handler->path_severed)
1278 path->handler->path_severed(path, ips->ipuser); 1302 path->handler->path_severed(path, ips->ipuser);
1279 else { 1303 else {
1280 iucv_sever_pathid(path->pathid, NULL); 1304 iucv_sever_pathid(path->pathid, NULL);
1281 iucv_path_table[path->pathid] = NULL; 1305 iucv_path_table[path->pathid] = NULL;
1282 list_del_init(&path->list); 1306 list_del_init(&path->list);
1283 iucv_cleanup_pathid(path->pathid);
1284 iucv_path_free(path); 1307 iucv_path_free(path);
1285 } 1308 }
1286} 1309}
@@ -1309,8 +1332,7 @@ static void iucv_path_quiesced(struct iucv_irq_data *data)
1309 struct iucv_path_quiesced *ipq = (void *) data; 1332 struct iucv_path_quiesced *ipq = (void *) data;
1310 struct iucv_path *path = iucv_path_table[ipq->ippathid]; 1333 struct iucv_path *path = iucv_path_table[ipq->ippathid];
1311 1334
1312 BUG_ON(!path || !path->handler); 1335 if (path && path->handler && path->handler->path_quiesced)
1313 if (path->handler->path_quiesced)
1314 path->handler->path_quiesced(path, ipq->ipuser); 1336 path->handler->path_quiesced(path, ipq->ipuser);
1315} 1337}
1316 1338
@@ -1338,8 +1360,7 @@ static void iucv_path_resumed(struct iucv_irq_data *data)
1338 struct iucv_path_resumed *ipr = (void *) data; 1360 struct iucv_path_resumed *ipr = (void *) data;
1339 struct iucv_path *path = iucv_path_table[ipr->ippathid]; 1361 struct iucv_path *path = iucv_path_table[ipr->ippathid];
1340 1362
1341 BUG_ON(!path || !path->handler); 1363 if (path && path->handler && path->handler->path_resumed)
1342 if (path->handler->path_resumed)
1343 path->handler->path_resumed(path, ipr->ipuser); 1364 path->handler->path_resumed(path, ipr->ipuser);
1344} 1365}
1345 1366
@@ -1371,8 +1392,7 @@ static void iucv_message_complete(struct iucv_irq_data *data)
1371 struct iucv_path *path = iucv_path_table[imc->ippathid]; 1392 struct iucv_path *path = iucv_path_table[imc->ippathid];
1372 struct iucv_message msg; 1393 struct iucv_message msg;
1373 1394
1374 BUG_ON(!path || !path->handler); 1395 if (path && path->handler && path->handler->message_complete) {
1375 if (path->handler->message_complete) {
1376 msg.flags = imc->ipflags1; 1396 msg.flags = imc->ipflags1;
1377 msg.id = imc->ipmsgid; 1397 msg.id = imc->ipmsgid;
1378 msg.audit = imc->ipaudit; 1398 msg.audit = imc->ipaudit;
@@ -1417,8 +1437,7 @@ static void iucv_message_pending(struct iucv_irq_data *data)
1417 struct iucv_path *path = iucv_path_table[imp->ippathid]; 1437 struct iucv_path *path = iucv_path_table[imp->ippathid];
1418 struct iucv_message msg; 1438 struct iucv_message msg;
1419 1439
1420 BUG_ON(!path || !path->handler); 1440 if (path && path->handler && path->handler->message_pending) {
1421 if (path->handler->message_pending) {
1422 msg.flags = imp->ipflags1; 1441 msg.flags = imp->ipflags1;
1423 msg.id = imp->ipmsgid; 1442 msg.id = imp->ipmsgid;
1424 msg.class = imp->iptrgcls; 1443 msg.class = imp->iptrgcls;
@@ -1433,17 +1452,16 @@ static void iucv_message_pending(struct iucv_irq_data *data)
1433} 1452}
1434 1453
1435/** 1454/**
1436 * iucv_tasklet_handler: 1455 * iucv_tasklet_fn:
1437 * 1456 *
1438 * This tasklet loops over the queue of irq buffers created by 1457 * This tasklet loops over the queue of irq buffers created by
1439 * iucv_external_interrupt, calls the appropriate action handler 1458 * iucv_external_interrupt, calls the appropriate action handler
1440 * and then frees the buffer. 1459 * and then frees the buffer.
1441 */ 1460 */
1442static void iucv_tasklet_handler(unsigned long ignored) 1461static void iucv_tasklet_fn(unsigned long ignored)
1443{ 1462{
1444 typedef void iucv_irq_fn(struct iucv_irq_data *); 1463 typedef void iucv_irq_fn(struct iucv_irq_data *);
1445 static iucv_irq_fn *irq_fn[] = { 1464 static iucv_irq_fn *irq_fn[] = {
1446 [0x01] = iucv_path_pending,
1447 [0x02] = iucv_path_complete, 1465 [0x02] = iucv_path_complete,
1448 [0x03] = iucv_path_severed, 1466 [0x03] = iucv_path_severed,
1449 [0x04] = iucv_path_quiesced, 1467 [0x04] = iucv_path_quiesced,
@@ -1453,38 +1471,70 @@ static void iucv_tasklet_handler(unsigned long ignored)
1453 [0x08] = iucv_message_pending, 1471 [0x08] = iucv_message_pending,
1454 [0x09] = iucv_message_pending, 1472 [0x09] = iucv_message_pending,
1455 }; 1473 };
1456 struct iucv_work *p; 1474 struct list_head task_queue = LIST_HEAD_INIT(task_queue);
1475 struct iucv_irq_list *p, *n;
1457 1476
1458 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ 1477 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
1459 spin_lock(&iucv_table_lock); 1478 spin_lock(&iucv_table_lock);
1460 iucv_tasklet_cpu = smp_processor_id(); 1479 iucv_active_cpu = smp_processor_id();
1461 1480
1462 spin_lock_irq(&iucv_work_lock); 1481 spin_lock_irq(&iucv_queue_lock);
1463 while (!list_empty(&iucv_work_queue)) { 1482 list_splice_init(&iucv_task_queue, &task_queue);
1464 p = list_entry(iucv_work_queue.next, struct iucv_work, list); 1483 spin_unlock_irq(&iucv_queue_lock);
1484
1485 list_for_each_entry_safe(p, n, &task_queue, list) {
1465 list_del_init(&p->list); 1486 list_del_init(&p->list);
1466 spin_unlock_irq(&iucv_work_lock);
1467 irq_fn[p->data.iptype](&p->data); 1487 irq_fn[p->data.iptype](&p->data);
1468 kfree(p); 1488 kfree(p);
1469 spin_lock_irq(&iucv_work_lock);
1470 } 1489 }
1471 spin_unlock_irq(&iucv_work_lock);
1472 1490
1473 iucv_tasklet_cpu = -1; 1491 iucv_active_cpu = -1;
1474 spin_unlock(&iucv_table_lock); 1492 spin_unlock(&iucv_table_lock);
1475} 1493}
1476 1494
1477/** 1495/**
1496 * iucv_work_fn:
1497 *
1498 * This work function loops over the queue of path pending irq blocks
1499 * created by iucv_external_interrupt, calls the appropriate action
1500 * handler and then frees the buffer.
1501 */
1502static void iucv_work_fn(struct work_struct *work)
1503{
1504 typedef void iucv_irq_fn(struct iucv_irq_data *);
1505 struct list_head work_queue = LIST_HEAD_INIT(work_queue);
1506 struct iucv_irq_list *p, *n;
1507
1508 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
1509 spin_lock_bh(&iucv_table_lock);
1510 iucv_active_cpu = smp_processor_id();
1511
1512 spin_lock_irq(&iucv_queue_lock);
1513 list_splice_init(&iucv_work_queue, &work_queue);
1514 spin_unlock_irq(&iucv_queue_lock);
1515
1516 iucv_cleanup_queue();
1517 list_for_each_entry_safe(p, n, &work_queue, list) {
1518 list_del_init(&p->list);
1519 iucv_path_pending(&p->data);
1520 kfree(p);
1521 }
1522
1523 iucv_active_cpu = -1;
1524 spin_unlock_bh(&iucv_table_lock);
1525}
1526
1527/**
1478 * iucv_external_interrupt 1528 * iucv_external_interrupt
1479 * @code: irq code 1529 * @code: irq code
1480 * 1530 *
1481 * Handles external interrupts coming in from CP. 1531 * Handles external interrupts coming in from CP.
1482 * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler(). 1532 * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn().
1483 */ 1533 */
1484static void iucv_external_interrupt(u16 code) 1534static void iucv_external_interrupt(u16 code)
1485{ 1535{
1486 struct iucv_irq_data *p; 1536 struct iucv_irq_data *p;
1487 struct iucv_work *work; 1537 struct iucv_irq_list *work;
1488 1538
1489 p = percpu_ptr(iucv_irq_data, smp_processor_id()); 1539 p = percpu_ptr(iucv_irq_data, smp_processor_id());
1490 if (p->ippathid >= iucv_max_pathid) { 1540 if (p->ippathid >= iucv_max_pathid) {
@@ -1498,16 +1548,23 @@ static void iucv_external_interrupt(u16 code)
1498 printk(KERN_ERR "iucv_do_int: unknown iucv interrupt\n"); 1548 printk(KERN_ERR "iucv_do_int: unknown iucv interrupt\n");
1499 return; 1549 return;
1500 } 1550 }
1501 work = kmalloc(sizeof(struct iucv_work), GFP_ATOMIC); 1551 work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC);
1502 if (!work) { 1552 if (!work) {
1503 printk(KERN_WARNING "iucv_external_interrupt: out of memory\n"); 1553 printk(KERN_WARNING "iucv_external_interrupt: out of memory\n");
1504 return; 1554 return;
1505 } 1555 }
1506 memcpy(&work->data, p, sizeof(work->data)); 1556 memcpy(&work->data, p, sizeof(work->data));
1507 spin_lock(&iucv_work_lock); 1557 spin_lock(&iucv_queue_lock);
1508 list_add_tail(&work->list, &iucv_work_queue); 1558 if (p->iptype == 0x01) {
1509 spin_unlock(&iucv_work_lock); 1559 /* Path pending interrupt. */
1510 tasklet_schedule(&iucv_tasklet); 1560 list_add_tail(&work->list, &iucv_work_queue);
1561 schedule_work(&iucv_work);
1562 } else {
1563 /* The other interrupts. */
1564 list_add_tail(&work->list, &iucv_task_queue);
1565 tasklet_schedule(&iucv_tasklet);
1566 }
1567 spin_unlock(&iucv_queue_lock);
1511} 1568}
1512 1569
1513/** 1570/**
@@ -1577,12 +1634,14 @@ out:
1577 */ 1634 */
1578static void iucv_exit(void) 1635static void iucv_exit(void)
1579{ 1636{
1580 struct iucv_work *p, *n; 1637 struct iucv_irq_list *p, *n;
1581 1638
1582 spin_lock_irq(&iucv_work_lock); 1639 spin_lock_irq(&iucv_queue_lock);
1640 list_for_each_entry_safe(p, n, &iucv_task_queue, list)
1641 kfree(p);
1583 list_for_each_entry_safe(p, n, &iucv_work_queue, list) 1642 list_for_each_entry_safe(p, n, &iucv_work_queue, list)
1584 kfree(p); 1643 kfree(p);
1585 spin_unlock_irq(&iucv_work_lock); 1644 spin_unlock_irq(&iucv_queue_lock);
1586 unregister_hotcpu_notifier(&iucv_cpu_notifier); 1645 unregister_hotcpu_notifier(&iucv_cpu_notifier);
1587 percpu_free(iucv_param); 1646 percpu_free(iucv_param);
1588 percpu_free(iucv_irq_data); 1647 percpu_free(iucv_irq_data);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 11938fb20395..2fc0a92caa78 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3987,7 +3987,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
3987 memcpy(&temp, &from->ipaddr, sizeof(temp)); 3987 memcpy(&temp, &from->ipaddr, sizeof(temp));
3988 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 3988 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
3989 addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; 3989 addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
3990 if(space_left < addrlen) 3990 if (space_left < addrlen)
3991 return -ENOMEM; 3991 return -ENOMEM;
3992 if (copy_to_user(to, &temp, addrlen)) 3992 if (copy_to_user(to, &temp, addrlen))
3993 return -EFAULT; 3993 return -EFAULT;
@@ -4076,8 +4076,9 @@ done:
4076/* Helper function that copies local addresses to user and returns the number 4076/* Helper function that copies local addresses to user and returns the number
4077 * of addresses copied. 4077 * of addresses copied.
4078 */ 4078 */
4079static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_addrs, 4079static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
4080 void __user *to) 4080 int max_addrs, void *to,
4081 int *bytes_copied)
4081{ 4082{
4082 struct list_head *pos, *next; 4083 struct list_head *pos, *next;
4083 struct sctp_sockaddr_entry *addr; 4084 struct sctp_sockaddr_entry *addr;
@@ -4094,10 +4095,10 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add
4094 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4095 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
4095 &temp); 4096 &temp);
4096 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4097 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4097 if (copy_to_user(to, &temp, addrlen)) 4098 memcpy(to, &temp, addrlen);
4098 return -EFAULT;
4099 4099
4100 to += addrlen; 4100 to += addrlen;
4101 *bytes_copied += addrlen;
4101 cnt ++; 4102 cnt ++;
4102 if (cnt >= max_addrs) break; 4103 if (cnt >= max_addrs) break;
4103 } 4104 }
@@ -4105,8 +4106,8 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add
4105 return cnt; 4106 return cnt;
4106} 4107}
4107 4108
4108static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, 4109static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4109 void __user **to, size_t space_left) 4110 size_t space_left, int *bytes_copied)
4110{ 4111{
4111 struct list_head *pos, *next; 4112 struct list_head *pos, *next;
4112 struct sctp_sockaddr_entry *addr; 4113 struct sctp_sockaddr_entry *addr;
@@ -4123,14 +4124,14 @@ static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port,
4123 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4124 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
4124 &temp); 4125 &temp);
4125 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4126 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4126 if(space_left<addrlen) 4127 if (space_left < addrlen)
4127 return -ENOMEM; 4128 return -ENOMEM;
4128 if (copy_to_user(*to, &temp, addrlen)) 4129 memcpy(to, &temp, addrlen);
4129 return -EFAULT;
4130 4130
4131 *to += addrlen; 4131 to += addrlen;
4132 cnt ++; 4132 cnt ++;
4133 space_left -= addrlen; 4133 space_left -= addrlen;
4134 bytes_copied += addrlen;
4134 } 4135 }
4135 4136
4136 return cnt; 4137 return cnt;
@@ -4154,6 +4155,8 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4154 int addrlen; 4155 int addrlen;
4155 rwlock_t *addr_lock; 4156 rwlock_t *addr_lock;
4156 int err = 0; 4157 int err = 0;
4158 void *addrs;
4159 int bytes_copied = 0;
4157 4160
4158 if (len != sizeof(struct sctp_getaddrs_old)) 4161 if (len != sizeof(struct sctp_getaddrs_old))
4159 return -EINVAL; 4162 return -EINVAL;
@@ -4181,6 +4184,15 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4181 4184
4182 to = getaddrs.addrs; 4185 to = getaddrs.addrs;
4183 4186
4187 /* Allocate space for a local instance of packed array to hold all
4188 * the data. We store addresses here first and then put write them
4189 * to the user in one shot.
4190 */
4191 addrs = kmalloc(sizeof(union sctp_addr) * getaddrs.addr_num,
4192 GFP_KERNEL);
4193 if (!addrs)
4194 return -ENOMEM;
4195
4184 sctp_read_lock(addr_lock); 4196 sctp_read_lock(addr_lock);
4185 4197
4186 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 4198 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
@@ -4190,13 +4202,9 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4190 addr = list_entry(bp->address_list.next, 4202 addr = list_entry(bp->address_list.next,
4191 struct sctp_sockaddr_entry, list); 4203 struct sctp_sockaddr_entry, list);
4192 if (sctp_is_any(&addr->a)) { 4204 if (sctp_is_any(&addr->a)) {
4193 cnt = sctp_copy_laddrs_to_user_old(sk, bp->port, 4205 cnt = sctp_copy_laddrs_old(sk, bp->port,
4194 getaddrs.addr_num, 4206 getaddrs.addr_num,
4195 to); 4207 addrs, &bytes_copied);
4196 if (cnt < 0) {
4197 err = cnt;
4198 goto unlock;
4199 }
4200 goto copy_getaddrs; 4208 goto copy_getaddrs;
4201 } 4209 }
4202 } 4210 }
@@ -4206,22 +4214,29 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4206 memcpy(&temp, &addr->a, sizeof(temp)); 4214 memcpy(&temp, &addr->a, sizeof(temp));
4207 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4215 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4208 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4216 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4209 if (copy_to_user(to, &temp, addrlen)) { 4217 memcpy(addrs, &temp, addrlen);
4210 err = -EFAULT;
4211 goto unlock;
4212 }
4213 to += addrlen; 4218 to += addrlen;
4219 bytes_copied += addrlen;
4214 cnt ++; 4220 cnt ++;
4215 if (cnt >= getaddrs.addr_num) break; 4221 if (cnt >= getaddrs.addr_num) break;
4216 } 4222 }
4217 4223
4218copy_getaddrs: 4224copy_getaddrs:
4225 sctp_read_unlock(addr_lock);
4226
4227 /* copy the entire address list into the user provided space */
4228 if (copy_to_user(to, addrs, bytes_copied)) {
4229 err = -EFAULT;
4230 goto error;
4231 }
4232
4233 /* copy the leading structure back to user */
4219 getaddrs.addr_num = cnt; 4234 getaddrs.addr_num = cnt;
4220 if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old))) 4235 if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old)))
4221 err = -EFAULT; 4236 err = -EFAULT;
4222 4237
4223unlock: 4238error:
4224 sctp_read_unlock(addr_lock); 4239 kfree(addrs);
4225 return err; 4240 return err;
4226} 4241}
4227 4242
@@ -4241,7 +4256,8 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4241 rwlock_t *addr_lock; 4256 rwlock_t *addr_lock;
4242 int err = 0; 4257 int err = 0;
4243 size_t space_left; 4258 size_t space_left;
4244 int bytes_copied; 4259 int bytes_copied = 0;
4260 void *addrs;
4245 4261
4246 if (len <= sizeof(struct sctp_getaddrs)) 4262 if (len <= sizeof(struct sctp_getaddrs))
4247 return -EINVAL; 4263 return -EINVAL;
@@ -4269,6 +4285,9 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4269 to = optval + offsetof(struct sctp_getaddrs,addrs); 4285 to = optval + offsetof(struct sctp_getaddrs,addrs);
4270 space_left = len - sizeof(struct sctp_getaddrs) - 4286 space_left = len - sizeof(struct sctp_getaddrs) -
4271 offsetof(struct sctp_getaddrs,addrs); 4287 offsetof(struct sctp_getaddrs,addrs);
4288 addrs = kmalloc(space_left, GFP_KERNEL);
4289 if (!addrs)
4290 return -ENOMEM;
4272 4291
4273 sctp_read_lock(addr_lock); 4292 sctp_read_lock(addr_lock);
4274 4293
@@ -4279,11 +4298,11 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4279 addr = list_entry(bp->address_list.next, 4298 addr = list_entry(bp->address_list.next,
4280 struct sctp_sockaddr_entry, list); 4299 struct sctp_sockaddr_entry, list);
4281 if (sctp_is_any(&addr->a)) { 4300 if (sctp_is_any(&addr->a)) {
4282 cnt = sctp_copy_laddrs_to_user(sk, bp->port, 4301 cnt = sctp_copy_laddrs(sk, bp->port, addrs,
4283 &to, space_left); 4302 space_left, &bytes_copied);
4284 if (cnt < 0) { 4303 if (cnt < 0) {
4285 err = cnt; 4304 err = cnt;
4286 goto unlock; 4305 goto error;
4287 } 4306 }
4288 goto copy_getaddrs; 4307 goto copy_getaddrs;
4289 } 4308 }
@@ -4294,26 +4313,31 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4294 memcpy(&temp, &addr->a, sizeof(temp)); 4313 memcpy(&temp, &addr->a, sizeof(temp));
4295 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4314 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4296 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4315 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4297 if(space_left < addrlen) 4316 if (space_left < addrlen) {
4298 return -ENOMEM; /*fixme: right error?*/ 4317 err = -ENOMEM; /*fixme: right error?*/
4299 if (copy_to_user(to, &temp, addrlen)) { 4318 goto error;
4300 err = -EFAULT;
4301 goto unlock;
4302 } 4319 }
4320 memcpy(addrs, &temp, addrlen);
4303 to += addrlen; 4321 to += addrlen;
4322 bytes_copied += addrlen;
4304 cnt ++; 4323 cnt ++;
4305 space_left -= addrlen; 4324 space_left -= addrlen;
4306 } 4325 }
4307 4326
4308copy_getaddrs: 4327copy_getaddrs:
4328 sctp_read_unlock(addr_lock);
4329
4330 if (copy_to_user(to, addrs, bytes_copied)) {
4331 err = -EFAULT;
4332 goto error;
4333 }
4309 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) 4334 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
4310 return -EFAULT; 4335 return -EFAULT;
4311 bytes_copied = ((char __user *)to) - optval;
4312 if (put_user(bytes_copied, optlen)) 4336 if (put_user(bytes_copied, optlen))
4313 return -EFAULT; 4337 return -EFAULT;
4314 4338
4315unlock: 4339error:
4316 sctp_read_unlock(addr_lock); 4340 kfree(addrs);
4317 return err; 4341 return err;
4318} 4342}
4319 4343
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 762926009c04..263e34e45265 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -579,8 +579,22 @@ static inline int xfrm_byidx_should_resize(int total)
579 return 0; 579 return 0;
580} 580}
581 581
582static DEFINE_MUTEX(hash_resize_mutex); 582void xfrm_spd_getinfo(struct xfrm_spdinfo *si)
583{
584 read_lock_bh(&xfrm_policy_lock);
585 si->incnt = xfrm_policy_count[XFRM_POLICY_IN];
586 si->outcnt = xfrm_policy_count[XFRM_POLICY_OUT];
587 si->fwdcnt = xfrm_policy_count[XFRM_POLICY_FWD];
588 si->inscnt = xfrm_policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
589 si->outscnt = xfrm_policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
590 si->fwdscnt = xfrm_policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
591 si->spdhcnt = xfrm_idx_hmask;
592 si->spdhmcnt = xfrm_policy_hashmax;
593 read_unlock_bh(&xfrm_policy_lock);
594}
595EXPORT_SYMBOL(xfrm_spd_getinfo);
583 596
597static DEFINE_MUTEX(hash_resize_mutex);
584static void xfrm_hash_resize(struct work_struct *__unused) 598static void xfrm_hash_resize(struct work_struct *__unused)
585{ 599{
586 int dir, total; 600 int dir, total;
@@ -1330,6 +1344,40 @@ xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
1330 return err; 1344 return err;
1331} 1345}
1332 1346
1347static int inline
1348xfrm_dst_alloc_copy(void **target, void *src, int size)
1349{
1350 if (!*target) {
1351 *target = kmalloc(size, GFP_ATOMIC);
1352 if (!*target)
1353 return -ENOMEM;
1354 }
1355 memcpy(*target, src, size);
1356 return 0;
1357}
1358
1359static int inline
1360xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
1361{
1362#ifdef CONFIG_XFRM_SUB_POLICY
1363 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1364 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1365 sel, sizeof(*sel));
1366#else
1367 return 0;
1368#endif
1369}
1370
1371static int inline
1372xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
1373{
1374#ifdef CONFIG_XFRM_SUB_POLICY
1375 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1376 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1377#else
1378 return 0;
1379#endif
1380}
1333 1381
1334static int stale_bundle(struct dst_entry *dst); 1382static int stale_bundle(struct dst_entry *dst);
1335 1383
@@ -1518,6 +1566,18 @@ restart:
1518 err = -EHOSTUNREACH; 1566 err = -EHOSTUNREACH;
1519 goto error; 1567 goto error;
1520 } 1568 }
1569
1570 if (npols > 1)
1571 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1572 else
1573 err = xfrm_dst_update_origin(dst, fl);
1574 if (unlikely(err)) {
1575 write_unlock_bh(&policy->lock);
1576 if (dst)
1577 dst_free(dst);
1578 goto error;
1579 }
1580
1521 dst->next = policy->bundles; 1581 dst->next = policy->bundles;
1522 policy->bundles = dst; 1582 policy->bundles = dst;
1523 dst_hold(dst); 1583 dst_hold(dst);
@@ -1933,6 +1993,15 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
1933 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || 1993 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
1934 (dst->dev && !netif_running(dst->dev))) 1994 (dst->dev && !netif_running(dst->dev)))
1935 return 0; 1995 return 0;
1996#ifdef CONFIG_XFRM_SUB_POLICY
1997 if (fl) {
1998 if (first->origin && !flow_cache_uli_match(first->origin, fl))
1999 return 0;
2000 if (first->partner &&
2001 !xfrm_selector_match(first->partner, fl, family))
2002 return 0;
2003 }
2004#endif
1936 2005
1937 last = NULL; 2006 last = NULL;
1938 2007
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 69110fed64b6..4210d91624cd 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -672,6 +672,81 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
672 return skb; 672 return skb;
673} 673}
674 674
675static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
676{
677 struct xfrm_spdinfo si;
678 struct nlmsghdr *nlh;
679 u32 *f;
680
681 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
682 if (nlh == NULL) /* shouldnt really happen ... */
683 return -EMSGSIZE;
684
685 f = nlmsg_data(nlh);
686 *f = flags;
687 xfrm_spd_getinfo(&si);
688
689 if (flags & XFRM_SPD_HMASK)
690 NLA_PUT_U32(skb, XFRMA_SPDHMASK, si.spdhcnt);
691 if (flags & XFRM_SPD_HMAX)
692 NLA_PUT_U32(skb, XFRMA_SPDHMAX, si.spdhmcnt);
693 if (flags & XFRM_SPD_ICNT)
694 NLA_PUT_U32(skb, XFRMA_SPDICNT, si.incnt);
695 if (flags & XFRM_SPD_OCNT)
696 NLA_PUT_U32(skb, XFRMA_SPDOCNT, si.outcnt);
697 if (flags & XFRM_SPD_FCNT)
698 NLA_PUT_U32(skb, XFRMA_SPDFCNT, si.fwdcnt);
699 if (flags & XFRM_SPD_ISCNT)
700 NLA_PUT_U32(skb, XFRMA_SPDISCNT, si.inscnt);
701 if (flags & XFRM_SPD_OSCNT)
702 NLA_PUT_U32(skb, XFRMA_SPDOSCNT, si.inscnt);
703 if (flags & XFRM_SPD_FSCNT)
704 NLA_PUT_U32(skb, XFRMA_SPDFSCNT, si.inscnt);
705
706 return nlmsg_end(skb, nlh);
707
708nla_put_failure:
709 nlmsg_cancel(skb, nlh);
710 return -EMSGSIZE;
711}
712
713static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
714 struct rtattr **xfrma)
715{
716 struct sk_buff *r_skb;
717 u32 *flags = NLMSG_DATA(nlh);
718 u32 spid = NETLINK_CB(skb).pid;
719 u32 seq = nlh->nlmsg_seq;
720 int len = NLMSG_LENGTH(sizeof(u32));
721
722
723 if (*flags & XFRM_SPD_HMASK)
724 len += RTA_SPACE(sizeof(u32));
725 if (*flags & XFRM_SPD_HMAX)
726 len += RTA_SPACE(sizeof(u32));
727 if (*flags & XFRM_SPD_ICNT)
728 len += RTA_SPACE(sizeof(u32));
729 if (*flags & XFRM_SPD_OCNT)
730 len += RTA_SPACE(sizeof(u32));
731 if (*flags & XFRM_SPD_FCNT)
732 len += RTA_SPACE(sizeof(u32));
733 if (*flags & XFRM_SPD_ISCNT)
734 len += RTA_SPACE(sizeof(u32));
735 if (*flags & XFRM_SPD_OSCNT)
736 len += RTA_SPACE(sizeof(u32));
737 if (*flags & XFRM_SPD_FSCNT)
738 len += RTA_SPACE(sizeof(u32));
739
740 r_skb = alloc_skb(len, GFP_ATOMIC);
741 if (r_skb == NULL)
742 return -ENOMEM;
743
744 if (build_spdinfo(r_skb, spid, seq, *flags) < 0)
745 BUG();
746
747 return nlmsg_unicast(xfrm_nl, r_skb, spid);
748}
749
675static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 750static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
676{ 751{
677 struct xfrm_sadinfo si; 752 struct xfrm_sadinfo si;
@@ -1879,6 +1954,7 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
1879 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), 1954 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
1880 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 1955 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1881 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = NLMSG_LENGTH(sizeof(u32)), 1956 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = NLMSG_LENGTH(sizeof(u32)),
1957 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = NLMSG_LENGTH(sizeof(u32)),
1882}; 1958};
1883 1959
1884#undef XMSGSIZE 1960#undef XMSGSIZE
@@ -1907,6 +1983,7 @@ static struct xfrm_link {
1907 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, 1983 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
1908 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, 1984 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
1909 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, 1985 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
1986 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
1910}; 1987};
1911 1988
1912static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1989static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)