aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/fddi.c2
-rw-r--r--net/802/hippi.c2
-rw-r--r--net/802/tr.c26
-rw-r--r--net/appletalk/dev.c22
-rw-r--r--net/bridge/br_device.c15
-rw-r--r--net/bridge/br_if.c23
-rw-r--r--net/bridge/br_input.c8
-rw-r--r--net/bridge/br_notify.c9
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/bridge/br_stp_bpdu.c3
-rw-r--r--net/core/dev.c15
-rw-r--r--net/core/ethtool.c22
-rw-r--r--net/core/net-sysfs.c20
-rw-r--r--net/core/sock.c14
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/devinet.c36
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/icmp.c9
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_output.c10
-rw-r--r--net/ipv4/ipvs/Makefile2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto.c3
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_icmp.c182
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c1
-rw-r--r--net/ipv4/multipath_drr.c20
-rw-r--r--net/ipv4/multipath_random.c2
-rw-r--r--net/ipv4/multipath_rr.c22
-rw-r--r--net/ipv4/multipath_wrandom.c8
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c28
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c1
-rw-r--r--net/ipv4/netfilter/ip_queue.c10
-rw-r--r--net/ipv4/protocol.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c9
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/udp.c14
-rw-r--r--net/ipv6/addrconf.c1
-rw-r--r--net/ipv6/icmp.c14
-rw-r--r--net/ipv6/ip6_flowlabel.c10
-rw-r--r--net/ipv6/ip6_output.c14
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/ipv6_syms.c1
-rw-r--r--net/ipv6/xfrm6_output.c1
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/irda/irda_device.c2
-rw-r--r--net/netlink/af_netlink.c16
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sched/Kconfig2
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/cls_basic.c3
-rw-r--r--net/sched/em_meta.c295
-rw-r--r--net/sched/sch_dsmark.c16
-rw-r--r--net/sched/sch_netem.c209
-rw-r--r--net/sctp/input.c49
-rw-r--r--net/sctp/ipv6.c36
-rw-r--r--net/sctp/proc.c194
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/socket.c12
-rw-r--r--net/socket.c2
-rw-r--r--net/unix/af_unix.c28
-rw-r--r--net/xfrm/xfrm_algo.c2
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--net/xfrm/xfrm_user.c15
70 files changed, 917 insertions, 604 deletions
diff --git a/net/802/fddi.c b/net/802/fddi.c
index f9a31a9f70f1..ebcf4830d6f1 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -10,7 +10,7 @@
10 * Authors: Lawrence V. Stefani, <stefani@lkg.dec.com> 10 * Authors: Lawrence V. Stefani, <stefani@lkg.dec.com>
11 * 11 *
12 * fddi.c is based on previous eth.c and tr.c work by 12 * fddi.c is based on previous eth.c and tr.c work by
13 * Ross Biro, <bir7@leland.Stanford.Edu> 13 * Ross Biro
14 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 14 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
15 * Mark Evans, <evansmp@uhura.aston.ac.uk> 15 * Mark Evans, <evansmp@uhura.aston.ac.uk>
16 * Florian La Roche, <rzsfl@rz.uni-sb.de> 16 * Florian La Roche, <rzsfl@rz.uni-sb.de>
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 4eb135c0afbb..051e8af56a77 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: @(#)hippi.c 1.0.0 05/29/97 8 * Version: @(#)hippi.c 1.0.0 05/29/97
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Florian La Roche, <rzsfl@rz.uni-sb.de> 13 * Florian La Roche, <rzsfl@rz.uni-sb.de>
diff --git a/net/802/tr.c b/net/802/tr.c
index 85293ccf7efc..a755e880f4ba 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -47,12 +47,12 @@ static void rif_check_expire(unsigned long dummy);
47 * Each RIF entry we learn is kept this way 47 * Each RIF entry we learn is kept this way
48 */ 48 */
49 49
50struct rif_cache_s { 50struct rif_cache {
51 unsigned char addr[TR_ALEN]; 51 unsigned char addr[TR_ALEN];
52 int iface; 52 int iface;
53 __u16 rcf; 53 __be16 rcf;
54 __u16 rseg[8]; 54 __be16 rseg[8];
55 struct rif_cache_s *next; 55 struct rif_cache *next;
56 unsigned long last_used; 56 unsigned long last_used;
57 unsigned char local_ring; 57 unsigned char local_ring;
58}; 58};
@@ -64,7 +64,7 @@ struct rif_cache_s {
64 * up a lot. 64 * up a lot.
65 */ 65 */
66 66
67static struct rif_cache_s *rif_table[RIF_TABLE_SIZE]; 67static struct rif_cache *rif_table[RIF_TABLE_SIZE];
68 68
69static DEFINE_SPINLOCK(rif_lock); 69static DEFINE_SPINLOCK(rif_lock);
70 70
@@ -249,7 +249,7 @@ void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device *
249{ 249{
250 int slack; 250 int slack;
251 unsigned int hash; 251 unsigned int hash;
252 struct rif_cache_s *entry; 252 struct rif_cache *entry;
253 unsigned char *olddata; 253 unsigned char *olddata;
254 static const unsigned char mcast_func_addr[] 254 static const unsigned char mcast_func_addr[]
255 = {0xC0,0x00,0x00,0x04,0x00,0x00}; 255 = {0xC0,0x00,0x00,0x04,0x00,0x00};
@@ -337,7 +337,7 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
337static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) 337static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
338{ 338{
339 unsigned int hash, rii_p = 0; 339 unsigned int hash, rii_p = 0;
340 struct rif_cache_s *entry; 340 struct rif_cache *entry;
341 341
342 342
343 spin_lock_bh(&rif_lock); 343 spin_lock_bh(&rif_lock);
@@ -373,7 +373,7 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
373 * FIXME: We ought to keep some kind of cache size 373 * FIXME: We ought to keep some kind of cache size
374 * limiting and adjust the timers to suit. 374 * limiting and adjust the timers to suit.
375 */ 375 */
376 entry=kmalloc(sizeof(struct rif_cache_s),GFP_ATOMIC); 376 entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
377 377
378 if(!entry) 378 if(!entry)
379 { 379 {
@@ -435,7 +435,7 @@ static void rif_check_expire(unsigned long dummy)
435 spin_lock_bh(&rif_lock); 435 spin_lock_bh(&rif_lock);
436 436
437 for(i =0; i < RIF_TABLE_SIZE; i++) { 437 for(i =0; i < RIF_TABLE_SIZE; i++) {
438 struct rif_cache_s *entry, **pentry; 438 struct rif_cache *entry, **pentry;
439 439
440 pentry = rif_table+i; 440 pentry = rif_table+i;
441 while((entry=*pentry) != NULL) { 441 while((entry=*pentry) != NULL) {
@@ -467,10 +467,10 @@ static void rif_check_expire(unsigned long dummy)
467 467
468#ifdef CONFIG_PROC_FS 468#ifdef CONFIG_PROC_FS
469 469
470static struct rif_cache_s *rif_get_idx(loff_t pos) 470static struct rif_cache *rif_get_idx(loff_t pos)
471{ 471{
472 int i; 472 int i;
473 struct rif_cache_s *entry; 473 struct rif_cache *entry;
474 loff_t off = 0; 474 loff_t off = 0;
475 475
476 for(i = 0; i < RIF_TABLE_SIZE; i++) 476 for(i = 0; i < RIF_TABLE_SIZE; i++)
@@ -493,7 +493,7 @@ static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
493static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos) 493static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
494{ 494{
495 int i; 495 int i;
496 struct rif_cache_s *ent = v; 496 struct rif_cache *ent = v;
497 497
498 ++*pos; 498 ++*pos;
499 499
@@ -522,7 +522,7 @@ static void rif_seq_stop(struct seq_file *seq, void *v)
522static int rif_seq_show(struct seq_file *seq, void *v) 522static int rif_seq_show(struct seq_file *seq, void *v)
523{ 523{
524 int j, rcf_len, segment, brdgnmb; 524 int j, rcf_len, segment, brdgnmb;
525 struct rif_cache_s *entry = v; 525 struct rif_cache *entry = v;
526 526
527 if (v == SEQ_START_TOKEN) 527 if (v == SEQ_START_TOKEN)
528 seq_puts(seq, 528 seq_puts(seq,
diff --git a/net/appletalk/dev.c b/net/appletalk/dev.c
index 76598445d84b..1237e208e246 100644
--- a/net/appletalk/dev.c
+++ b/net/appletalk/dev.c
@@ -19,7 +19,7 @@ static int ltalk_mac_addr(struct net_device *dev, void *addr)
19 return -EINVAL; 19 return -EINVAL;
20} 20}
21 21
22void ltalk_setup(struct net_device *dev) 22static void ltalk_setup(struct net_device *dev)
23{ 23{
24 /* Fill in the fields of the device structure with localtalk-generic values. */ 24 /* Fill in the fields of the device structure with localtalk-generic values. */
25 25
@@ -40,4 +40,22 @@ void ltalk_setup(struct net_device *dev)
40 40
41 dev->flags = IFF_BROADCAST|IFF_MULTICAST|IFF_NOARP; 41 dev->flags = IFF_BROADCAST|IFF_MULTICAST|IFF_NOARP;
42} 42}
43EXPORT_SYMBOL(ltalk_setup); 43
44/**
45 * alloc_ltalkdev - Allocates and sets up an localtalk device
46 * @sizeof_priv: Size of additional driver-private structure to be allocated
47 * for this localtalk device
48 *
49 * Fill in the fields of the device structure with localtalk-generic
50 * values. Basically does everything except registering the device.
51 *
52 * Constructs a new net device, complete with a private data area of
53 * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
54 * this private data area.
55 */
56
57struct net_device *alloc_ltalkdev(int sizeof_priv)
58{
59 return alloc_netdev(sizeof_priv, "lt%d", ltalk_setup);
60}
61EXPORT_SYMBOL(alloc_ltalkdev);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index d9b72fde433c..f564ee99782d 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -21,10 +21,7 @@
21 21
22static struct net_device_stats *br_dev_get_stats(struct net_device *dev) 22static struct net_device_stats *br_dev_get_stats(struct net_device *dev)
23{ 23{
24 struct net_bridge *br; 24 struct net_bridge *br = netdev_priv(dev);
25
26 br = dev->priv;
27
28 return &br->statistics; 25 return &br->statistics;
29} 26}
30 27
@@ -54,9 +51,11 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
54 51
55static int br_dev_open(struct net_device *dev) 52static int br_dev_open(struct net_device *dev)
56{ 53{
57 netif_start_queue(dev); 54 struct net_bridge *br = netdev_priv(dev);
58 55
59 br_stp_enable_bridge(dev->priv); 56 br_features_recompute(br);
57 netif_start_queue(dev);
58 br_stp_enable_bridge(br);
60 59
61 return 0; 60 return 0;
62} 61}
@@ -67,7 +66,7 @@ static void br_dev_set_multicast_list(struct net_device *dev)
67 66
68static int br_dev_stop(struct net_device *dev) 67static int br_dev_stop(struct net_device *dev)
69{ 68{
70 br_stp_disable_bridge(dev->priv); 69 br_stp_disable_bridge(netdev_priv(dev));
71 70
72 netif_stop_queue(dev); 71 netif_stop_queue(dev);
73 72
@@ -76,7 +75,7 @@ static int br_dev_stop(struct net_device *dev)
76 75
77static int br_change_mtu(struct net_device *dev, int new_mtu) 76static int br_change_mtu(struct net_device *dev, int new_mtu)
78{ 77{
79 if ((new_mtu < 68) || new_mtu > br_min_mtu(dev->priv)) 78 if (new_mtu < 68 || new_mtu > br_min_mtu(netdev_priv(dev)))
80 return -EINVAL; 79 return -EINVAL;
81 80
82 dev->mtu = new_mtu; 81 dev->mtu = new_mtu;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 69872bf3b87e..91bb895375f4 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -314,6 +314,28 @@ int br_min_mtu(const struct net_bridge *br)
314 return mtu; 314 return mtu;
315} 315}
316 316
317/*
318 * Recomputes features using slave's features
319 */
320void br_features_recompute(struct net_bridge *br)
321{
322 struct net_bridge_port *p;
323 unsigned long features, checksum;
324
325 features = NETIF_F_SG | NETIF_F_FRAGLIST
326 | NETIF_F_HIGHDMA | NETIF_F_TSO;
327 checksum = NETIF_F_IP_CSUM; /* least commmon subset */
328
329 list_for_each_entry(p, &br->port_list, list) {
330 if (!(p->dev->features
331 & (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)))
332 checksum = 0;
333 features &= p->dev->features;
334 }
335
336 br->dev->features = features | checksum | NETIF_F_LLTX;
337}
338
317/* called with RTNL */ 339/* called with RTNL */
318int br_add_if(struct net_bridge *br, struct net_device *dev) 340int br_add_if(struct net_bridge *br, struct net_device *dev)
319{ 341{
@@ -368,6 +390,7 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
368 390
369 spin_lock_bh(&br->lock); 391 spin_lock_bh(&br->lock);
370 br_stp_recalculate_bridge_id(br); 392 br_stp_recalculate_bridge_id(br);
393 br_features_recompute(br);
371 spin_unlock_bh(&br->lock); 394 spin_unlock_bh(&br->lock);
372 395
373 return 0; 396 return 0;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 2b1cce46cab4..8f5f2e730992 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -26,7 +26,7 @@ static int br_pass_frame_up_finish(struct sk_buff *skb)
26#ifdef CONFIG_NETFILTER_DEBUG 26#ifdef CONFIG_NETFILTER_DEBUG
27 skb->nf_debug = 0; 27 skb->nf_debug = 0;
28#endif 28#endif
29 netif_rx(skb); 29 netif_receive_skb(skb);
30 30
31 return 0; 31 return 0;
32} 32}
@@ -54,6 +54,9 @@ int br_handle_frame_finish(struct sk_buff *skb)
54 struct net_bridge_fdb_entry *dst; 54 struct net_bridge_fdb_entry *dst;
55 int passedup = 0; 55 int passedup = 0;
56 56
57 /* insert into forwarding database after filtering to avoid spoofing */
58 br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
59
57 if (br->dev->flags & IFF_PROMISC) { 60 if (br->dev->flags & IFF_PROMISC) {
58 struct sk_buff *skb2; 61 struct sk_buff *skb2;
59 62
@@ -108,8 +111,7 @@ int br_handle_frame(struct net_bridge_port *p, struct sk_buff **pskb)
108 if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) 111 if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
109 goto err; 112 goto err;
110 113
111 if (p->state == BR_STATE_LEARNING || 114 if (p->state == BR_STATE_LEARNING)
112 p->state == BR_STATE_FORWARDING)
113 br_fdb_update(p->br, p, eth_hdr(skb)->h_source); 115 br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
114 116
115 if (p->br->stp_enabled && 117 if (p->br->stp_enabled &&
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index f8fb49e34764..917311c6828b 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -65,6 +65,15 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
65 } 65 }
66 break; 66 break;
67 67
68 case NETDEV_FEAT_CHANGE:
69 if (br->dev->flags & IFF_UP)
70 br_features_recompute(br);
71
72 /* could do recursive feature change notification
73 * but who would care??
74 */
75 break;
76
68 case NETDEV_DOWN: 77 case NETDEV_DOWN:
69 if (br->dev->flags & IFF_UP) 78 if (br->dev->flags & IFF_UP)
70 br_stp_disable_port(p); 79 br_stp_disable_port(p);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 54d63f1372a0..bdf95a74d8cd 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -174,6 +174,7 @@ extern int br_add_if(struct net_bridge *br,
174extern int br_del_if(struct net_bridge *br, 174extern int br_del_if(struct net_bridge *br,
175 struct net_device *dev); 175 struct net_device *dev);
176extern int br_min_mtu(const struct net_bridge *br); 176extern int br_min_mtu(const struct net_bridge *br);
177extern void br_features_recompute(struct net_bridge *br);
177 178
178/* br_input.c */ 179/* br_input.c */
179extern int br_handle_frame_finish(struct sk_buff *skb); 180extern int br_handle_frame_finish(struct sk_buff *skb);
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index b91a875aca01..d071f1c9ad0b 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -140,6 +140,9 @@ int br_stp_handle_bpdu(struct sk_buff *skb)
140 struct net_bridge *br = p->br; 140 struct net_bridge *br = p->br;
141 unsigned char *buf; 141 unsigned char *buf;
142 142
143 /* insert into forwarding database after filtering to avoid spoofing */
144 br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
145
143 /* need at least the 802 and STP headers */ 146 /* need at least the 802 and STP headers */
144 if (!pskb_may_pull(skb, sizeof(header)+1) || 147 if (!pskb_may_pull(skb, sizeof(header)+1) ||
145 memcmp(skb->data, header, sizeof(header))) 148 memcmp(skb->data, header, sizeof(header)))
diff --git a/net/core/dev.c b/net/core/dev.c
index f5f005846fe1..ab935778ce81 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -7,7 +7,7 @@
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 * 8 *
9 * Derived from the non IP parts of dev.c 1.0.19 9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * 13 *
@@ -761,6 +761,18 @@ int dev_change_name(struct net_device *dev, char *newname)
761} 761}
762 762
763/** 763/**
764 * netdev_features_change - device changes fatures
765 * @dev: device to cause notification
766 *
767 * Called to indicate a device has changed features.
768 */
769void netdev_features_change(struct net_device *dev)
770{
771 notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
772}
773EXPORT_SYMBOL(netdev_features_change);
774
775/**
764 * netdev_state_change - device changes state 776 * netdev_state_change - device changes state
765 * @dev: device to cause notification 777 * @dev: device to cause notification
766 * 778 *
@@ -1732,6 +1744,7 @@ static int process_backlog(struct net_device *backlog_dev, int *budget)
1732 struct softnet_data *queue = &__get_cpu_var(softnet_data); 1744 struct softnet_data *queue = &__get_cpu_var(softnet_data);
1733 unsigned long start_time = jiffies; 1745 unsigned long start_time = jiffies;
1734 1746
1747 backlog_dev->weight = weight_p;
1735 for (;;) { 1748 for (;;) {
1736 struct sk_buff *skb; 1749 struct sk_buff *skb;
1737 struct net_device *dev; 1750 struct net_device *dev;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index f05fde97c43d..a3eeb88e1c81 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -29,7 +29,7 @@ u32 ethtool_op_get_link(struct net_device *dev)
29 29
30u32 ethtool_op_get_tx_csum(struct net_device *dev) 30u32 ethtool_op_get_tx_csum(struct net_device *dev)
31{ 31{
32 return (dev->features & NETIF_F_IP_CSUM) != 0; 32 return (dev->features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM)) != 0;
33} 33}
34 34
35int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) 35int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
@@ -42,6 +42,15 @@ int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
42 return 0; 42 return 0;
43} 43}
44 44
45int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
46{
47 if (data)
48 dev->features |= NETIF_F_HW_CSUM;
49 else
50 dev->features &= ~NETIF_F_HW_CSUM;
51
52 return 0;
53}
45u32 ethtool_op_get_sg(struct net_device *dev) 54u32 ethtool_op_get_sg(struct net_device *dev)
46{ 55{
47 return (dev->features & NETIF_F_SG) != 0; 56 return (dev->features & NETIF_F_SG) != 0;
@@ -347,7 +356,7 @@ static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
347{ 356{
348 struct ethtool_coalesce coalesce; 357 struct ethtool_coalesce coalesce;
349 358
350 if (!dev->ethtool_ops->get_coalesce) 359 if (!dev->ethtool_ops->set_coalesce)
351 return -EOPNOTSUPP; 360 return -EOPNOTSUPP;
352 361
353 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) 362 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
@@ -682,6 +691,7 @@ int dev_ethtool(struct ifreq *ifr)
682 void __user *useraddr = ifr->ifr_data; 691 void __user *useraddr = ifr->ifr_data;
683 u32 ethcmd; 692 u32 ethcmd;
684 int rc; 693 int rc;
694 unsigned long old_features;
685 695
686 /* 696 /*
687 * XXX: This can be pushed down into the ethtool_* handlers that 697 * XXX: This can be pushed down into the ethtool_* handlers that
@@ -703,6 +713,8 @@ int dev_ethtool(struct ifreq *ifr)
703 if ((rc = dev->ethtool_ops->begin(dev)) < 0) 713 if ((rc = dev->ethtool_ops->begin(dev)) < 0)
704 return rc; 714 return rc;
705 715
716 old_features = dev->features;
717
706 switch (ethcmd) { 718 switch (ethcmd) {
707 case ETHTOOL_GSET: 719 case ETHTOOL_GSET:
708 rc = ethtool_get_settings(dev, useraddr); 720 rc = ethtool_get_settings(dev, useraddr);
@@ -712,7 +724,6 @@ int dev_ethtool(struct ifreq *ifr)
712 break; 724 break;
713 case ETHTOOL_GDRVINFO: 725 case ETHTOOL_GDRVINFO:
714 rc = ethtool_get_drvinfo(dev, useraddr); 726 rc = ethtool_get_drvinfo(dev, useraddr);
715
716 break; 727 break;
717 case ETHTOOL_GREGS: 728 case ETHTOOL_GREGS:
718 rc = ethtool_get_regs(dev, useraddr); 729 rc = ethtool_get_regs(dev, useraddr);
@@ -801,6 +812,10 @@ int dev_ethtool(struct ifreq *ifr)
801 812
802 if(dev->ethtool_ops->complete) 813 if(dev->ethtool_ops->complete)
803 dev->ethtool_ops->complete(dev); 814 dev->ethtool_ops->complete(dev);
815
816 if (old_features != dev->features)
817 netdev_features_change(dev);
818
804 return rc; 819 return rc;
805 820
806 ioctl: 821 ioctl:
@@ -817,3 +832,4 @@ EXPORT_SYMBOL(ethtool_op_get_tx_csum);
817EXPORT_SYMBOL(ethtool_op_set_sg); 832EXPORT_SYMBOL(ethtool_op_set_sg);
818EXPORT_SYMBOL(ethtool_op_set_tso); 833EXPORT_SYMBOL(ethtool_op_set_tso);
819EXPORT_SYMBOL(ethtool_op_set_tx_csum); 834EXPORT_SYMBOL(ethtool_op_set_tx_csum);
835EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 060f703659e8..e2137f3e489d 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -21,6 +21,7 @@
21#define to_net_dev(class) container_of(class, struct net_device, class_dev) 21#define to_net_dev(class) container_of(class, struct net_device, class_dev)
22 22
23static const char fmt_hex[] = "%#x\n"; 23static const char fmt_hex[] = "%#x\n";
24static const char fmt_long_hex[] = "%#lx\n";
24static const char fmt_dec[] = "%d\n"; 25static const char fmt_dec[] = "%d\n";
25static const char fmt_ulong[] = "%lu\n"; 26static const char fmt_ulong[] = "%lu\n";
26 27
@@ -91,7 +92,7 @@ static CLASS_DEVICE_ATTR(field, S_IRUGO, show_##field, NULL) \
91NETDEVICE_ATTR(addr_len, fmt_dec); 92NETDEVICE_ATTR(addr_len, fmt_dec);
92NETDEVICE_ATTR(iflink, fmt_dec); 93NETDEVICE_ATTR(iflink, fmt_dec);
93NETDEVICE_ATTR(ifindex, fmt_dec); 94NETDEVICE_ATTR(ifindex, fmt_dec);
94NETDEVICE_ATTR(features, fmt_hex); 95NETDEVICE_ATTR(features, fmt_long_hex);
95NETDEVICE_ATTR(type, fmt_dec); 96NETDEVICE_ATTR(type, fmt_dec);
96 97
97/* use same locking rules as GIFHWADDR ioctl's */ 98/* use same locking rules as GIFHWADDR ioctl's */
@@ -184,6 +185,22 @@ static ssize_t store_tx_queue_len(struct class_device *dev, const char *buf, siz
184static CLASS_DEVICE_ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len, 185static CLASS_DEVICE_ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
185 store_tx_queue_len); 186 store_tx_queue_len);
186 187
188NETDEVICE_SHOW(weight, fmt_dec);
189
190static int change_weight(struct net_device *net, unsigned long new_weight)
191{
192 net->weight = new_weight;
193 return 0;
194}
195
196static ssize_t store_weight(struct class_device *dev, const char *buf, size_t len)
197{
198 return netdev_store(dev, buf, len, change_weight);
199}
200
201static CLASS_DEVICE_ATTR(weight, S_IRUGO | S_IWUSR, show_weight,
202 store_weight);
203
187 204
188static struct class_device_attribute *net_class_attributes[] = { 205static struct class_device_attribute *net_class_attributes[] = {
189 &class_device_attr_ifindex, 206 &class_device_attr_ifindex,
@@ -193,6 +210,7 @@ static struct class_device_attribute *net_class_attributes[] = {
193 &class_device_attr_features, 210 &class_device_attr_features,
194 &class_device_attr_mtu, 211 &class_device_attr_mtu,
195 &class_device_attr_flags, 212 &class_device_attr_flags,
213 &class_device_attr_weight,
196 &class_device_attr_type, 214 &class_device_attr_type,
197 &class_device_attr_address, 215 &class_device_attr_address,
198 &class_device_attr_broadcast, 216 &class_device_attr_broadcast,
diff --git a/net/core/sock.c b/net/core/sock.c
index 98171ddd7e7d..96e00b08698f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -9,7 +9,7 @@
9 * 9 *
10 * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $ 10 * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
11 * 11 *
12 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 12 * Authors: Ross Biro
13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Florian La Roche, <flla@stud.uni-sb.de> 14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Alan Cox, <A.Cox@swansea.ac.uk> 15 * Alan Cox, <A.Cox@swansea.ac.uk>
@@ -635,7 +635,11 @@ struct sock *sk_alloc(int family, int priority, struct proto *prot, int zero_it)
635 if (zero_it) { 635 if (zero_it) {
636 memset(sk, 0, prot->obj_size); 636 memset(sk, 0, prot->obj_size);
637 sk->sk_family = family; 637 sk->sk_family = family;
638 sk->sk_prot = prot; 638 /*
639 * See comment in struct sock definition to understand
640 * why we need sk_prot_creator -acme
641 */
642 sk->sk_prot = sk->sk_prot_creator = prot;
639 sock_lock_init(sk); 643 sock_lock_init(sk);
640 } 644 }
641 645
@@ -654,7 +658,7 @@ struct sock *sk_alloc(int family, int priority, struct proto *prot, int zero_it)
654void sk_free(struct sock *sk) 658void sk_free(struct sock *sk)
655{ 659{
656 struct sk_filter *filter; 660 struct sk_filter *filter;
657 struct module *owner = sk->sk_prot->owner; 661 struct module *owner = sk->sk_prot_creator->owner;
658 662
659 if (sk->sk_destruct) 663 if (sk->sk_destruct)
660 sk->sk_destruct(sk); 664 sk->sk_destruct(sk);
@@ -672,8 +676,8 @@ void sk_free(struct sock *sk)
672 __FUNCTION__, atomic_read(&sk->sk_omem_alloc)); 676 __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
673 677
674 security_sk_free(sk); 678 security_sk_free(sk);
675 if (sk->sk_prot->slab != NULL) 679 if (sk->sk_prot_creator->slab != NULL)
676 kmem_cache_free(sk->sk_prot->slab, sk); 680 kmem_cache_free(sk->sk_prot_creator->slab, sk);
677 else 681 else
678 kfree(sk); 682 kfree(sk);
679 module_put(owner); 683 module_put(owner);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index e6e23eb14428..ee7bf46eb78a 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1426,7 +1426,7 @@ static struct rtnetlink_link dnet_rtnetlink_table[RTM_NR_MSGTYPES] =
1426 [RTM_GETRULE - RTM_BASE] = { .dumpit = dn_fib_dump_rules, }, 1426 [RTM_GETRULE - RTM_BASE] = { .dumpit = dn_fib_dump_rules, },
1427#else 1427#else
1428 [RTM_GETROUTE - RTM_BASE] = { .doit = dn_cache_getroute, 1428 [RTM_GETROUTE - RTM_BASE] = { .doit = dn_cache_getroute,
1429 .dumpit = dn_cache_dump, 1429 .dumpit = dn_cache_dump, },
1430#endif 1430#endif
1431 1431
1432}; 1432};
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 16c4234cbe12..6617ea47d365 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: @(#)eth.c 1.0.7 05/25/93 8 * Version: @(#)eth.c 1.0.7 05/25/93
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Florian La Roche, <rzsfl@rz.uni-sb.de> 13 * Florian La Roche, <rzsfl@rz.uni-sb.de>
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index cdad47642ae7..03942f133944 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: $Id: af_inet.c,v 1.137 2002/02/01 22:01:03 davem Exp $ 8 * Version: $Id: af_inet.c,v 1.137 2002/02/01 22:01:03 davem Exp $
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de> 12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk> 13 * Alan Cox, <A.Cox@swansea.ac.uk>
@@ -1181,6 +1181,7 @@ EXPORT_SYMBOL(inet_stream_connect);
1181EXPORT_SYMBOL(inet_stream_ops); 1181EXPORT_SYMBOL(inet_stream_ops);
1182EXPORT_SYMBOL(inet_unregister_protosw); 1182EXPORT_SYMBOL(inet_unregister_protosw);
1183EXPORT_SYMBOL(net_statistics); 1183EXPORT_SYMBOL(net_statistics);
1184EXPORT_SYMBOL(sysctl_ip_nonlocal_bind);
1184 1185
1185#ifdef INET_REFCNT_DEBUG 1186#ifdef INET_REFCNT_DEBUG
1186EXPORT_SYMBOL(inet_sock_nr); 1187EXPORT_SYMBOL(inet_sock_nr);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index abbc6d5c183e..478a30179a52 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -9,7 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * Derived from the IP parts of dev.c 1.0.19 11 * Derived from the IP parts of dev.c 1.0.19
12 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 12 * Authors: Ross Biro
13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Mark Evans, <evansmp@uhura.aston.ac.uk> 14 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * 15 *
@@ -233,11 +233,14 @@ int inet_addr_onlink(struct in_device *in_dev, u32 a, u32 b)
233static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, 233static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
234 int destroy) 234 int destroy)
235{ 235{
236 struct in_ifaddr *promote = NULL;
236 struct in_ifaddr *ifa1 = *ifap; 237 struct in_ifaddr *ifa1 = *ifap;
237 238
238 ASSERT_RTNL(); 239 ASSERT_RTNL();
239 240
240 /* 1. Deleting primary ifaddr forces deletion all secondaries */ 241 /* 1. Deleting primary ifaddr forces deletion all secondaries
242 * unless alias promotion is set
243 **/
241 244
242 if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) { 245 if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
243 struct in_ifaddr *ifa; 246 struct in_ifaddr *ifa;
@@ -251,11 +254,16 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
251 continue; 254 continue;
252 } 255 }
253 256
254 *ifap1 = ifa->ifa_next; 257 if (!IN_DEV_PROMOTE_SECONDARIES(in_dev)) {
258 *ifap1 = ifa->ifa_next;
255 259
256 rtmsg_ifa(RTM_DELADDR, ifa); 260 rtmsg_ifa(RTM_DELADDR, ifa);
257 notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa); 261 notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa);
258 inet_free_ifa(ifa); 262 inet_free_ifa(ifa);
263 } else {
264 promote = ifa;
265 break;
266 }
259 } 267 }
260 } 268 }
261 269
@@ -281,6 +289,13 @@ static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
281 if (!in_dev->ifa_list) 289 if (!in_dev->ifa_list)
282 inetdev_destroy(in_dev); 290 inetdev_destroy(in_dev);
283 } 291 }
292
293 if (promote && IN_DEV_PROMOTE_SECONDARIES(in_dev)) {
294 /* not sure if we should send a delete notify first? */
295 promote->ifa_flags &= ~IFA_F_SECONDARY;
296 rtmsg_ifa(RTM_NEWADDR, promote);
297 notifier_call_chain(&inetaddr_chain, NETDEV_UP, promote);
298 }
284} 299}
285 300
286static int inet_insert_ifa(struct in_ifaddr *ifa) 301static int inet_insert_ifa(struct in_ifaddr *ifa)
@@ -1384,6 +1399,15 @@ static struct devinet_sysctl_table {
1384 .proc_handler = &ipv4_doint_and_flush, 1399 .proc_handler = &ipv4_doint_and_flush,
1385 .strategy = &ipv4_doint_and_flush_strategy, 1400 .strategy = &ipv4_doint_and_flush_strategy,
1386 }, 1401 },
1402 {
1403 .ctl_name = NET_IPV4_CONF_PROMOTE_SECONDARIES,
1404 .procname = "promote_secondaries",
1405 .data = &ipv4_devconf.promote_secondaries,
1406 .maxlen = sizeof(int),
1407 .mode = 0644,
1408 .proc_handler = &ipv4_doint_and_flush,
1409 .strategy = &ipv4_doint_and_flush_strategy,
1410 },
1387 }, 1411 },
1388 .devinet_dev = { 1412 .devinet_dev = {
1389 { 1413 {
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 053a883247ba..eae84cc39d3f 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -478,7 +478,7 @@ static int __init esp4_init(void)
478{ 478{
479 struct xfrm_decap_state decap; 479 struct xfrm_decap_state decap;
480 480
481 if (sizeof(struct esp_decap_data) < 481 if (sizeof(struct esp_decap_data) >
482 sizeof(decap.decap_data)) { 482 sizeof(decap.decap_data)) {
483 extern void decap_data_too_small(void); 483 extern void decap_data_too_small(void);
484 484
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 85bf0d3e294b..cb759484979d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -207,6 +207,7 @@ int sysctl_icmp_ignore_bogus_error_responses;
207 207
208int sysctl_icmp_ratelimit = 1 * HZ; 208int sysctl_icmp_ratelimit = 1 * HZ;
209int sysctl_icmp_ratemask = 0x1818; 209int sysctl_icmp_ratemask = 0x1818;
210int sysctl_icmp_errors_use_inbound_ifaddr;
210 211
211/* 212/*
212 * ICMP control array. This specifies what to do with each ICMP. 213 * ICMP control array. This specifies what to do with each ICMP.
@@ -511,8 +512,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
511 */ 512 */
512 513
513 saddr = iph->daddr; 514 saddr = iph->daddr;
514 if (!(rt->rt_flags & RTCF_LOCAL)) 515 if (!(rt->rt_flags & RTCF_LOCAL)) {
515 saddr = 0; 516 if (sysctl_icmp_errors_use_inbound_ifaddr)
517 saddr = inet_select_addr(skb_in->dev, 0, RT_SCOPE_LINK);
518 else
519 saddr = 0;
520 }
516 521
517 tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | 522 tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
518 IPTOS_PREC_INTERNETCONTROL) : 523 IPTOS_PREC_INTERNETCONTROL) :
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index a0d0833034be..4e47a2658c7c 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: $Id: ip_input.c,v 1.55 2002/01/12 07:39:45 davem Exp $ 8 * Version: $Id: ip_input.c,v 1.55 2002/01/12 07:39:45 davem Exp $
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org> 12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org> 13 * Alan Cox, <Alan.Cox@linux.org>
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 24fe3e00b42b..760dc8238d65 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $ 8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org> 12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org> 13 * Alan Cox, <Alan.Cox@linux.org>
@@ -490,6 +490,14 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
490 /* Partially cloned skb? */ 490 /* Partially cloned skb? */
491 if (skb_shared(frag)) 491 if (skb_shared(frag))
492 goto slow_path; 492 goto slow_path;
493
494 BUG_ON(frag->sk);
495 if (skb->sk) {
496 sock_hold(skb->sk);
497 frag->sk = skb->sk;
498 frag->destructor = sock_wfree;
499 skb->truesize -= frag->truesize;
500 }
493 } 501 }
494 502
495 /* Everything is OK. Generate! */ 503 /* Everything is OK. Generate! */
diff --git a/net/ipv4/ipvs/Makefile b/net/ipv4/ipvs/Makefile
index a788461a40c9..30e85de9ffff 100644
--- a/net/ipv4/ipvs/Makefile
+++ b/net/ipv4/ipvs/Makefile
@@ -11,7 +11,7 @@ ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH) += ip_vs_proto_ah.o
11 11
12ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \ 12ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \
13 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \ 13 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \
14 ip_vs_est.o ip_vs_proto.o ip_vs_proto_icmp.o \ 14 ip_vs_est.o ip_vs_proto.o \
15 $(ip_vs_proto-objs-y) 15 $(ip_vs_proto-objs-y)
16 16
17 17
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index 253c46252bd5..867d4e9c6594 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -216,9 +216,6 @@ int ip_vs_protocol_init(void)
216#ifdef CONFIG_IP_VS_PROTO_UDP 216#ifdef CONFIG_IP_VS_PROTO_UDP
217 REGISTER_PROTOCOL(&ip_vs_protocol_udp); 217 REGISTER_PROTOCOL(&ip_vs_protocol_udp);
218#endif 218#endif
219#ifdef CONFIG_IP_VS_PROTO_ICMP
220 REGISTER_PROTOCOL(&ip_vs_protocol_icmp);
221#endif
222#ifdef CONFIG_IP_VS_PROTO_AH 219#ifdef CONFIG_IP_VS_PROTO_AH
223 REGISTER_PROTOCOL(&ip_vs_protocol_ah); 220 REGISTER_PROTOCOL(&ip_vs_protocol_ah);
224#endif 221#endif
diff --git a/net/ipv4/ipvs/ip_vs_proto_icmp.c b/net/ipv4/ipvs/ip_vs_proto_icmp.c
deleted file mode 100644
index 191e94aa1c1f..000000000000
--- a/net/ipv4/ipvs/ip_vs_proto_icmp.c
+++ /dev/null
@@ -1,182 +0,0 @@
1/*
2 * ip_vs_proto_icmp.c: ICMP load balancing support for IP Virtual Server
3 *
4 * Authors: Julian Anastasov <ja@ssi.bg>, March 2002
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation;
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/icmp.h>
15#include <linux/netfilter.h>
16#include <linux/netfilter_ipv4.h>
17
18#include <net/ip_vs.h>
19
20
21static int icmp_timeouts[1] = { 1*60*HZ };
22
23static char * icmp_state_name_table[1] = { "ICMP" };
24
25static struct ip_vs_conn *
26icmp_conn_in_get(const struct sk_buff *skb,
27 struct ip_vs_protocol *pp,
28 const struct iphdr *iph,
29 unsigned int proto_off,
30 int inverse)
31{
32#if 0
33 struct ip_vs_conn *cp;
34
35 if (likely(!inverse)) {
36 cp = ip_vs_conn_in_get(iph->protocol,
37 iph->saddr, 0,
38 iph->daddr, 0);
39 } else {
40 cp = ip_vs_conn_in_get(iph->protocol,
41 iph->daddr, 0,
42 iph->saddr, 0);
43 }
44
45 return cp;
46
47#else
48 return NULL;
49#endif
50}
51
52static struct ip_vs_conn *
53icmp_conn_out_get(const struct sk_buff *skb,
54 struct ip_vs_protocol *pp,
55 const struct iphdr *iph,
56 unsigned int proto_off,
57 int inverse)
58{
59#if 0
60 struct ip_vs_conn *cp;
61
62 if (likely(!inverse)) {
63 cp = ip_vs_conn_out_get(iph->protocol,
64 iph->saddr, 0,
65 iph->daddr, 0);
66 } else {
67 cp = ip_vs_conn_out_get(IPPROTO_UDP,
68 iph->daddr, 0,
69 iph->saddr, 0);
70 }
71
72 return cp;
73#else
74 return NULL;
75#endif
76}
77
78static int
79icmp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp,
80 int *verdict, struct ip_vs_conn **cpp)
81{
82 *verdict = NF_ACCEPT;
83 return 0;
84}
85
86static int
87icmp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp)
88{
89 if (!(skb->nh.iph->frag_off & __constant_htons(IP_OFFSET))) {
90 if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
91 if (ip_vs_checksum_complete(skb, skb->nh.iph->ihl * 4)) {
92 IP_VS_DBG_RL_PKT(0, pp, skb, 0, "Failed checksum for");
93 return 0;
94 }
95 }
96 }
97 return 1;
98}
99
100static void
101icmp_debug_packet(struct ip_vs_protocol *pp,
102 const struct sk_buff *skb,
103 int offset,
104 const char *msg)
105{
106 char buf[256];
107 struct iphdr _iph, *ih;
108
109 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
110 if (ih == NULL)
111 sprintf(buf, "%s TRUNCATED", pp->name);
112 else if (ih->frag_off & __constant_htons(IP_OFFSET))
113 sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u frag",
114 pp->name, NIPQUAD(ih->saddr),
115 NIPQUAD(ih->daddr));
116 else {
117 struct icmphdr _icmph, *ic;
118
119 ic = skb_header_pointer(skb, offset + ih->ihl*4,
120 sizeof(_icmph), &_icmph);
121 if (ic == NULL)
122 sprintf(buf, "%s TRUNCATED to %u bytes\n",
123 pp->name, skb->len - offset);
124 else
125 sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u T:%d C:%d",
126 pp->name, NIPQUAD(ih->saddr),
127 NIPQUAD(ih->daddr),
128 ic->type, ic->code);
129 }
130 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
131}
132
133static int
134icmp_state_transition(struct ip_vs_conn *cp, int direction,
135 const struct sk_buff *skb,
136 struct ip_vs_protocol *pp)
137{
138 cp->timeout = pp->timeout_table[IP_VS_ICMP_S_NORMAL];
139 return 1;
140}
141
142static int
143icmp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
144{
145 int num;
146 char **names;
147
148 num = IP_VS_ICMP_S_LAST;
149 names = icmp_state_name_table;
150 return ip_vs_set_state_timeout(pp->timeout_table, num, names, sname, to);
151}
152
153
154static void icmp_init(struct ip_vs_protocol *pp)
155{
156 pp->timeout_table = icmp_timeouts;
157}
158
159static void icmp_exit(struct ip_vs_protocol *pp)
160{
161}
162
163struct ip_vs_protocol ip_vs_protocol_icmp = {
164 .name = "ICMP",
165 .protocol = IPPROTO_ICMP,
166 .dont_defrag = 0,
167 .init = icmp_init,
168 .exit = icmp_exit,
169 .conn_schedule = icmp_conn_schedule,
170 .conn_in_get = icmp_conn_in_get,
171 .conn_out_get = icmp_conn_out_get,
172 .snat_handler = NULL,
173 .dnat_handler = NULL,
174 .csum_check = icmp_csum_check,
175 .state_transition = icmp_state_transition,
176 .register_app = NULL,
177 .unregister_app = NULL,
178 .app_conn_bind = NULL,
179 .debug_packet = icmp_debug_packet,
180 .timeout_change = NULL,
181 .set_state_timeout = icmp_set_state_timeout,
182};
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index faa6176bbeb1..de21da00057f 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -508,7 +508,6 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
508 rc = NF_ACCEPT; 508 rc = NF_ACCEPT;
509 /* do not touch skb anymore */ 509 /* do not touch skb anymore */
510 atomic_inc(&cp->in_pkts); 510 atomic_inc(&cp->in_pkts);
511 __ip_vs_conn_put(cp);
512 goto out; 511 goto out;
513 } 512 }
514 513
diff --git a/net/ipv4/multipath_drr.c b/net/ipv4/multipath_drr.c
index 9349686131fc..c9cf8726051d 100644
--- a/net/ipv4/multipath_drr.c
+++ b/net/ipv4/multipath_drr.c
@@ -31,6 +31,7 @@
31#include <linux/igmp.h> 31#include <linux/igmp.h>
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/module.h>
34#include <linux/mroute.h> 35#include <linux/mroute.h>
35#include <linux/init.h> 36#include <linux/init.h>
36#include <net/ip.h> 37#include <net/ip.h>
@@ -57,7 +58,6 @@ struct multipath_device {
57 58
58static struct multipath_device state[MULTIPATH_MAX_DEVICECANDIDATES]; 59static struct multipath_device state[MULTIPATH_MAX_DEVICECANDIDATES];
59static DEFINE_SPINLOCK(state_lock); 60static DEFINE_SPINLOCK(state_lock);
60static struct rtable *last_selection = NULL;
61 61
62static int inline __multipath_findslot(void) 62static int inline __multipath_findslot(void)
63{ 63{
@@ -111,11 +111,6 @@ struct notifier_block drr_dev_notifier = {
111 .notifier_call = drr_dev_event, 111 .notifier_call = drr_dev_event,
112}; 112};
113 113
114static void drr_remove(struct rtable *rt)
115{
116 if (last_selection == rt)
117 last_selection = NULL;
118}
119 114
120static void drr_safe_inc(atomic_t *usecount) 115static void drr_safe_inc(atomic_t *usecount)
121{ 116{
@@ -144,14 +139,6 @@ static void drr_select_route(const struct flowi *flp,
144 int devidx = -1; 139 int devidx = -1;
145 int cur_min_devidx = -1; 140 int cur_min_devidx = -1;
146 141
147 /* if necessary and possible utilize the old alternative */
148 if ((flp->flags & FLOWI_FLAG_MULTIPATHOLDROUTE) != 0 &&
149 last_selection != NULL) {
150 result = last_selection;
151 *rp = result;
152 return;
153 }
154
155 /* 1. make sure all alt. nexthops have the same GC related data */ 142 /* 1. make sure all alt. nexthops have the same GC related data */
156 /* 2. determine the new candidate to be returned */ 143 /* 2. determine the new candidate to be returned */
157 result = NULL; 144 result = NULL;
@@ -229,12 +216,10 @@ static void drr_select_route(const struct flowi *flp,
229 } 216 }
230 217
231 *rp = result; 218 *rp = result;
232 last_selection = result;
233} 219}
234 220
235static struct ip_mp_alg_ops drr_ops = { 221static struct ip_mp_alg_ops drr_ops = {
236 .mp_alg_select_route = drr_select_route, 222 .mp_alg_select_route = drr_select_route,
237 .mp_alg_remove = drr_remove,
238}; 223};
239 224
240static int __init drr_init(void) 225static int __init drr_init(void)
@@ -244,7 +229,7 @@ static int __init drr_init(void)
244 if (err) 229 if (err)
245 return err; 230 return err;
246 231
247 err = multipath_alg_register(&drr_ops, IP_MP_ALG_RR); 232 err = multipath_alg_register(&drr_ops, IP_MP_ALG_DRR);
248 if (err) 233 if (err)
249 goto fail; 234 goto fail;
250 235
@@ -263,3 +248,4 @@ static void __exit drr_exit(void)
263 248
264module_init(drr_init); 249module_init(drr_init);
265module_exit(drr_exit); 250module_exit(drr_exit);
251MODULE_LICENSE("GPL");
diff --git a/net/ipv4/multipath_random.c b/net/ipv4/multipath_random.c
index 805a16e47de5..5249dbe7c559 100644
--- a/net/ipv4/multipath_random.c
+++ b/net/ipv4/multipath_random.c
@@ -31,6 +31,7 @@
31#include <linux/igmp.h> 31#include <linux/igmp.h>
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/module.h>
34#include <linux/mroute.h> 35#include <linux/mroute.h>
35#include <linux/init.h> 36#include <linux/init.h>
36#include <net/ip.h> 37#include <net/ip.h>
@@ -126,3 +127,4 @@ static void __exit random_exit(void)
126 127
127module_init(random_init); 128module_init(random_init);
128module_exit(random_exit); 129module_exit(random_exit);
130MODULE_LICENSE("GPL");
diff --git a/net/ipv4/multipath_rr.c b/net/ipv4/multipath_rr.c
index 554a82568160..b6cd2870478f 100644
--- a/net/ipv4/multipath_rr.c
+++ b/net/ipv4/multipath_rr.c
@@ -31,6 +31,7 @@
31#include <linux/igmp.h> 31#include <linux/igmp.h>
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/module.h>
34#include <linux/mroute.h> 35#include <linux/mroute.h>
35#include <linux/init.h> 36#include <linux/init.h>
36#include <net/ip.h> 37#include <net/ip.h>
@@ -47,29 +48,12 @@
47#include <net/checksum.h> 48#include <net/checksum.h>
48#include <net/ip_mp_alg.h> 49#include <net/ip_mp_alg.h>
49 50
50#define MULTIPATH_MAX_CANDIDATES 40
51
52static struct rtable* last_used = NULL;
53
54static void rr_remove(struct rtable *rt)
55{
56 if (last_used == rt)
57 last_used = NULL;
58}
59
60static void rr_select_route(const struct flowi *flp, 51static void rr_select_route(const struct flowi *flp,
61 struct rtable *first, struct rtable **rp) 52 struct rtable *first, struct rtable **rp)
62{ 53{
63 struct rtable *nh, *result, *min_use_cand = NULL; 54 struct rtable *nh, *result, *min_use_cand = NULL;
64 int min_use = -1; 55 int min_use = -1;
65 56
66 /* if necessary and possible utilize the old alternative */
67 if ((flp->flags & FLOWI_FLAG_MULTIPATHOLDROUTE) != 0 &&
68 last_used != NULL) {
69 result = last_used;
70 goto out;
71 }
72
73 /* 1. make sure all alt. nexthops have the same GC related data 57 /* 1. make sure all alt. nexthops have the same GC related data
74 * 2. determine the new candidate to be returned 58 * 2. determine the new candidate to be returned
75 */ 59 */
@@ -90,15 +74,12 @@ static void rr_select_route(const struct flowi *flp,
90 if (!result) 74 if (!result)
91 result = first; 75 result = first;
92 76
93out:
94 last_used = result;
95 result->u.dst.__use++; 77 result->u.dst.__use++;
96 *rp = result; 78 *rp = result;
97} 79}
98 80
99static struct ip_mp_alg_ops rr_ops = { 81static struct ip_mp_alg_ops rr_ops = {
100 .mp_alg_select_route = rr_select_route, 82 .mp_alg_select_route = rr_select_route,
101 .mp_alg_remove = rr_remove,
102}; 83};
103 84
104static int __init rr_init(void) 85static int __init rr_init(void)
@@ -113,3 +94,4 @@ static void __exit rr_exit(void)
113 94
114module_init(rr_init); 95module_init(rr_init);
115module_exit(rr_exit); 96module_exit(rr_exit);
97MODULE_LICENSE("GPL");
diff --git a/net/ipv4/multipath_wrandom.c b/net/ipv4/multipath_wrandom.c
index 10b23e1bece6..bd7d75b6abe0 100644
--- a/net/ipv4/multipath_wrandom.c
+++ b/net/ipv4/multipath_wrandom.c
@@ -31,6 +31,7 @@
31#include <linux/igmp.h> 31#include <linux/igmp.h>
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/module.h>
34#include <linux/mroute.h> 35#include <linux/mroute.h>
35#include <linux/init.h> 36#include <linux/init.h>
36#include <net/ip.h> 37#include <net/ip.h>
@@ -172,7 +173,7 @@ static void wrandom_select_route(const struct flowi *flp,
172 multipath_comparekeys(&rt->fl, flp)) { 173 multipath_comparekeys(&rt->fl, flp)) {
173 struct multipath_candidate* mpc = 174 struct multipath_candidate* mpc =
174 (struct multipath_candidate*) 175 (struct multipath_candidate*)
175 kmalloc(size_mpc, GFP_KERNEL); 176 kmalloc(size_mpc, GFP_ATOMIC);
176 177
177 if (!mpc) 178 if (!mpc)
178 return; 179 return;
@@ -244,7 +245,7 @@ static void wrandom_set_nhinfo(__u32 network,
244 if (!target_route) { 245 if (!target_route) {
245 const size_t size_rt = sizeof(struct multipath_route); 246 const size_t size_rt = sizeof(struct multipath_route);
246 target_route = (struct multipath_route *) 247 target_route = (struct multipath_route *)
247 kmalloc(size_rt, GFP_KERNEL); 248 kmalloc(size_rt, GFP_ATOMIC);
248 249
249 target_route->gw = nh->nh_gw; 250 target_route->gw = nh->nh_gw;
250 target_route->oif = nh->nh_oif; 251 target_route->oif = nh->nh_oif;
@@ -265,7 +266,7 @@ static void wrandom_set_nhinfo(__u32 network,
265 if (!target_dest) { 266 if (!target_dest) {
266 const size_t size_dst = sizeof(struct multipath_dest); 267 const size_t size_dst = sizeof(struct multipath_dest);
267 target_dest = (struct multipath_dest*) 268 target_dest = (struct multipath_dest*)
268 kmalloc(size_dst, GFP_KERNEL); 269 kmalloc(size_dst, GFP_ATOMIC);
269 270
270 target_dest->nh_info = nh; 271 target_dest->nh_info = nh;
271 target_dest->network = network; 272 target_dest->network = network;
@@ -342,3 +343,4 @@ static void __exit wrandom_exit(void)
342 343
343module_init(wrandom_init); 344module_init(wrandom_init);
344module_exit(wrandom_exit); 345module_exit(wrandom_exit);
346MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 28d9425d5c39..09e824622977 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -940,37 +940,25 @@ void ip_ct_refresh_acct(struct ip_conntrack *ct,
940struct sk_buff * 940struct sk_buff *
941ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user) 941ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user)
942{ 942{
943 struct sock *sk = skb->sk;
944#ifdef CONFIG_NETFILTER_DEBUG 943#ifdef CONFIG_NETFILTER_DEBUG
945 unsigned int olddebug = skb->nf_debug; 944 unsigned int olddebug = skb->nf_debug;
946#endif 945#endif
947 946
948 if (sk) { 947 skb_orphan(skb);
949 sock_hold(sk);
950 skb_orphan(skb);
951 }
952 948
953 local_bh_disable(); 949 local_bh_disable();
954 skb = ip_defrag(skb, user); 950 skb = ip_defrag(skb, user);
955 local_bh_enable(); 951 local_bh_enable();
956 952
957 if (!skb) { 953 if (skb) {
958 if (sk) 954 ip_send_check(skb->nh.iph);
959 sock_put(sk); 955 skb->nfcache |= NFC_ALTERED;
960 return skb;
961 }
962
963 if (sk) {
964 skb_set_owner_w(skb, sk);
965 sock_put(sk);
966 }
967
968 ip_send_check(skb->nh.iph);
969 skb->nfcache |= NFC_ALTERED;
970#ifdef CONFIG_NETFILTER_DEBUG 956#ifdef CONFIG_NETFILTER_DEBUG
971 /* Packet path as if nothing had happened. */ 957 /* Packet path as if nothing had happened. */
972 skb->nf_debug = olddebug; 958 skb->nf_debug = olddebug;
973#endif 959#endif
960 }
961
974 return skb; 962 return skb;
975} 963}
976 964
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index 46ca45f74d85..bc59f7b39805 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -256,6 +256,7 @@ static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos)
256{ 256{
257 struct list_head *e = v; 257 struct list_head *e = v;
258 258
259 ++*pos;
259 e = e->next; 260 e = e->next;
260 261
261 if (e == &ip_conntrack_expect_list) 262 if (e == &ip_conntrack_expect_list)
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index e5746b674413..eda1fba431a4 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -3,6 +3,7 @@
3 * communicating with userspace via netlink. 3 * communicating with userspace via netlink.
4 * 4 *
5 * (C) 2000-2002 James Morris <jmorris@intercode.com.au> 5 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
6 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -17,6 +18,7 @@
17 * 2005-01-10: Added /proc counter for dropped packets; fixed so 18 * 2005-01-10: Added /proc counter for dropped packets; fixed so
18 * packets aren't delivered to user space if they're going 19 * packets aren't delivered to user space if they're going
19 * to be dropped. 20 * to be dropped.
21 * 2005-05-26: local_bh_{disable,enable} around nf_reinject (Harald Welte)
20 * 22 *
21 */ 23 */
22#include <linux/module.h> 24#include <linux/module.h>
@@ -71,7 +73,15 @@ static DECLARE_MUTEX(ipqnl_sem);
71static void 73static void
72ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) 74ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
73{ 75{
76 /* TCP input path (and probably other bits) assume to be called
77 * from softirq context, not from syscall, like ipq_issue_verdict is
78 * called. TCP input path deadlocks with locks taken from timer
79 * softirq, e.g. We therefore emulate this by local_bh_disable() */
80
81 local_bh_disable();
74 nf_reinject(entry->skb, entry->info, verdict); 82 nf_reinject(entry->skb, entry->info, verdict);
83 local_bh_enable();
84
75 kfree(entry); 85 kfree(entry);
76} 86}
77 87
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 90a587cacaa4..0db405a869f2 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: $Id: protocol.c,v 1.14 2001/05/18 02:25:49 davem Exp $ 8 * Version: $Id: protocol.c,v 1.14 2001/05/18 02:25:49 davem Exp $
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * 12 *
13 * Fixes: 13 * Fixes:
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 93624a32eb9a..5b1ec586bae6 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: $Id: raw.c,v 1.64 2002/02/01 22:01:04 davem Exp $ 8 * Version: $Id: raw.c,v 1.64 2002/02/01 22:01:04 davem Exp $
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * 12 *
13 * Fixes: 13 * Fixes:
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 199311746932..a682d28e247b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $ 8 * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org> 12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Linus Torvalds, <Linus.Torvalds@helsinki.fi> 13 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 3aafb298c1c1..23068bddbf0b 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -23,6 +23,7 @@ extern int sysctl_ip_nonlocal_bind;
23extern int sysctl_icmp_echo_ignore_all; 23extern int sysctl_icmp_echo_ignore_all;
24extern int sysctl_icmp_echo_ignore_broadcasts; 24extern int sysctl_icmp_echo_ignore_broadcasts;
25extern int sysctl_icmp_ignore_bogus_error_responses; 25extern int sysctl_icmp_ignore_bogus_error_responses;
26extern int sysctl_icmp_errors_use_inbound_ifaddr;
26 27
27/* From ip_fragment.c */ 28/* From ip_fragment.c */
28extern int sysctl_ipfrag_low_thresh; 29extern int sysctl_ipfrag_low_thresh;
@@ -396,6 +397,14 @@ ctl_table ipv4_table[] = {
396 .proc_handler = &proc_dointvec 397 .proc_handler = &proc_dointvec
397 }, 398 },
398 { 399 {
400 .ctl_name = NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR,
401 .procname = "icmp_errors_use_inbound_ifaddr",
402 .data = &sysctl_icmp_errors_use_inbound_ifaddr,
403 .maxlen = sizeof(int),
404 .mode = 0644,
405 .proc_handler = &proc_dointvec
406 },
407 {
399 .ctl_name = NET_IPV4_ROUTE, 408 .ctl_name = NET_IPV4_ROUTE,
400 .procname = "route", 409 .procname = "route",
401 .maxlen = 0, 410 .maxlen = 0,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5cff56af7855..0d9a4fd5f1a4 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $ 8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
@@ -2338,7 +2338,7 @@ void __init tcp_init(void)
2338 (tcp_bhash_size * sizeof(struct tcp_bind_hashbucket)); 2338 (tcp_bhash_size * sizeof(struct tcp_bind_hashbucket));
2339 order++) 2339 order++)
2340 ; 2340 ;
2341 if (order > 4) { 2341 if (order >= 4) {
2342 sysctl_local_port_range[0] = 32768; 2342 sysctl_local_port_range[0] = 32768;
2343 sysctl_local_port_range[1] = 61000; 2343 sysctl_local_port_range[1] = 61000;
2344 sysctl_tcp_max_tw_buckets = 180000; 2344 sysctl_tcp_max_tw_buckets = 180000;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6984042c0927..5bad504630a3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: $Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $ 8 * Version: $Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
@@ -4355,16 +4355,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4355 goto no_ack; 4355 goto no_ack;
4356 } 4356 }
4357 4357
4358 if (eaten) { 4358 __tcp_ack_snd_check(sk, 0);
4359 if (tcp_in_quickack_mode(tp)) {
4360 tcp_send_ack(sk);
4361 } else {
4362 tcp_send_delayed_ack(sk);
4363 }
4364 } else {
4365 __tcp_ack_snd_check(sk, 0);
4366 }
4367
4368no_ack: 4359no_ack:
4369 if (eaten) 4360 if (eaten)
4370 __kfree_skb(skb); 4361 __kfree_skb(skb);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index fd70509f0d53..eea1a17a9ac2 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $ 8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a12df6979ffd..fa24e7ae1f40 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $ 8 * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 85b279f1e935..799ebe061e2c 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: $Id: tcp_timer.c,v 1.88 2002/02/01 22:01:04 davem Exp $ 8 * Version: $Id: tcp_timer.c,v 1.88 2002/02/01 22:01:04 davem Exp $
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8a213238f287..7c24e64b443f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $ 8 * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 12 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
13 * Alan Cox, <Alan.Cox@linux.org> 13 * Alan Cox, <Alan.Cox@linux.org>
@@ -738,7 +738,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
738 unsigned long amount; 738 unsigned long amount;
739 739
740 amount = 0; 740 amount = 0;
741 spin_lock_irq(&sk->sk_receive_queue.lock); 741 spin_lock_bh(&sk->sk_receive_queue.lock);
742 skb = skb_peek(&sk->sk_receive_queue); 742 skb = skb_peek(&sk->sk_receive_queue);
743 if (skb != NULL) { 743 if (skb != NULL) {
744 /* 744 /*
@@ -748,7 +748,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
748 */ 748 */
749 amount = skb->len - sizeof(struct udphdr); 749 amount = skb->len - sizeof(struct udphdr);
750 } 750 }
751 spin_unlock_irq(&sk->sk_receive_queue.lock); 751 spin_unlock_bh(&sk->sk_receive_queue.lock);
752 return put_user(amount, (int __user *)arg); 752 return put_user(amount, (int __user *)arg);
753 } 753 }
754 754
@@ -848,12 +848,12 @@ csum_copy_err:
848 /* Clear queue. */ 848 /* Clear queue. */
849 if (flags&MSG_PEEK) { 849 if (flags&MSG_PEEK) {
850 int clear = 0; 850 int clear = 0;
851 spin_lock_irq(&sk->sk_receive_queue.lock); 851 spin_lock_bh(&sk->sk_receive_queue.lock);
852 if (skb == skb_peek(&sk->sk_receive_queue)) { 852 if (skb == skb_peek(&sk->sk_receive_queue)) {
853 __skb_unlink(skb, &sk->sk_receive_queue); 853 __skb_unlink(skb, &sk->sk_receive_queue);
854 clear = 1; 854 clear = 1;
855 } 855 }
856 spin_unlock_irq(&sk->sk_receive_queue.lock); 856 spin_unlock_bh(&sk->sk_receive_queue.lock);
857 if (clear) 857 if (clear)
858 kfree_skb(skb); 858 kfree_skb(skb);
859 } 859 }
@@ -1334,7 +1334,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1334 struct sk_buff_head *rcvq = &sk->sk_receive_queue; 1334 struct sk_buff_head *rcvq = &sk->sk_receive_queue;
1335 struct sk_buff *skb; 1335 struct sk_buff *skb;
1336 1336
1337 spin_lock_irq(&rcvq->lock); 1337 spin_lock_bh(&rcvq->lock);
1338 while ((skb = skb_peek(rcvq)) != NULL) { 1338 while ((skb = skb_peek(rcvq)) != NULL) {
1339 if (udp_checksum_complete(skb)) { 1339 if (udp_checksum_complete(skb)) {
1340 UDP_INC_STATS_BH(UDP_MIB_INERRORS); 1340 UDP_INC_STATS_BH(UDP_MIB_INERRORS);
@@ -1345,7 +1345,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
1345 break; 1345 break;
1346 } 1346 }
1347 } 1347 }
1348 spin_unlock_irq(&rcvq->lock); 1348 spin_unlock_bh(&rcvq->lock);
1349 1349
1350 /* nothing to see, move along */ 1350 /* nothing to see, move along */
1351 if (skb == NULL) 1351 if (skb == NULL)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 7744a2592693..2720899d516c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -372,6 +372,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
372 ndev->regen_timer.data = (unsigned long) ndev; 372 ndev->regen_timer.data = (unsigned long) ndev;
373 if ((dev->flags&IFF_LOOPBACK) || 373 if ((dev->flags&IFF_LOOPBACK) ||
374 dev->type == ARPHRD_TUNNEL || 374 dev->type == ARPHRD_TUNNEL ||
375 dev->type == ARPHRD_NONE ||
375 dev->type == ARPHRD_SIT) { 376 dev->type == ARPHRD_SIT) {
376 printk(KERN_INFO 377 printk(KERN_INFO
377 "Disabled Privacy Extensions on device %p(%s)\n", 378 "Disabled Privacy Extensions on device %p(%s)\n",
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 8e0f569b883e..ff3ec9822e36 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -277,8 +277,8 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
277{ 277{
278 struct inet6_dev *idev = NULL; 278 struct inet6_dev *idev = NULL;
279 struct ipv6hdr *hdr = skb->nh.ipv6h; 279 struct ipv6hdr *hdr = skb->nh.ipv6h;
280 struct sock *sk = icmpv6_socket->sk; 280 struct sock *sk;
281 struct ipv6_pinfo *np = inet6_sk(sk); 281 struct ipv6_pinfo *np;
282 struct in6_addr *saddr = NULL; 282 struct in6_addr *saddr = NULL;
283 struct dst_entry *dst; 283 struct dst_entry *dst;
284 struct icmp6hdr tmp_hdr; 284 struct icmp6hdr tmp_hdr;
@@ -358,6 +358,9 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
358 if (icmpv6_xmit_lock()) 358 if (icmpv6_xmit_lock())
359 return; 359 return;
360 360
361 sk = icmpv6_socket->sk;
362 np = inet6_sk(sk);
363
361 if (!icmpv6_xrlim_allow(sk, type, &fl)) 364 if (!icmpv6_xrlim_allow(sk, type, &fl))
362 goto out; 365 goto out;
363 366
@@ -423,9 +426,9 @@ out:
423 426
424static void icmpv6_echo_reply(struct sk_buff *skb) 427static void icmpv6_echo_reply(struct sk_buff *skb)
425{ 428{
426 struct sock *sk = icmpv6_socket->sk; 429 struct sock *sk;
427 struct inet6_dev *idev; 430 struct inet6_dev *idev;
428 struct ipv6_pinfo *np = inet6_sk(sk); 431 struct ipv6_pinfo *np;
429 struct in6_addr *saddr = NULL; 432 struct in6_addr *saddr = NULL;
430 struct icmp6hdr *icmph = (struct icmp6hdr *) skb->h.raw; 433 struct icmp6hdr *icmph = (struct icmp6hdr *) skb->h.raw;
431 struct icmp6hdr tmp_hdr; 434 struct icmp6hdr tmp_hdr;
@@ -454,6 +457,9 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
454 if (icmpv6_xmit_lock()) 457 if (icmpv6_xmit_lock())
455 return; 458 return;
456 459
460 sk = icmpv6_socket->sk;
461 np = inet6_sk(sk);
462
457 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) 463 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
458 fl.oif = np->mcast_oif; 464 fl.oif = np->mcast_oif;
459 465
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index a93f6dc51979..0e5f7499debb 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -535,10 +535,12 @@ release:
535 if (err) 535 if (err)
536 goto done; 536 goto done;
537 537
538 /* Do not check for fault */ 538 if (!freq.flr_label) {
539 if (!freq.flr_label) 539 if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
540 copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label, 540 &fl->label, sizeof(fl->label))) {
541 &fl->label, sizeof(fl->label)); 541 /* Intentionally ignore fault. */
542 }
543 }
542 544
543 sfl1->fl = fl; 545 sfl1->fl = fl;
544 sfl1->next = np->ipv6_fl_list; 546 sfl1->next = np->ipv6_fl_list;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 0f0711417c9d..b78a53586804 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -552,13 +552,17 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
552 skb_headroom(frag) < hlen) 552 skb_headroom(frag) < hlen)
553 goto slow_path; 553 goto slow_path;
554 554
555 /* Correct socket ownership. */
556 if (frag->sk == NULL)
557 goto slow_path;
558
559 /* Partially cloned skb? */ 555 /* Partially cloned skb? */
560 if (skb_shared(frag)) 556 if (skb_shared(frag))
561 goto slow_path; 557 goto slow_path;
558
559 BUG_ON(frag->sk);
560 if (skb->sk) {
561 sock_hold(skb->sk);
562 frag->sk = skb->sk;
563 frag->destructor = sock_wfree;
564 skb->truesize -= frag->truesize;
565 }
562 } 566 }
563 567
564 err = 0; 568 err = 0;
@@ -1116,12 +1120,10 @@ int ip6_push_pending_frames(struct sock *sk)
1116 tail_skb = &(tmp_skb->next); 1120 tail_skb = &(tmp_skb->next);
1117 skb->len += tmp_skb->len; 1121 skb->len += tmp_skb->len;
1118 skb->data_len += tmp_skb->len; 1122 skb->data_len += tmp_skb->len;
1119#if 0 /* Logically correct, but useless work, ip_fragment() will have to undo */
1120 skb->truesize += tmp_skb->truesize; 1123 skb->truesize += tmp_skb->truesize;
1121 __sock_put(tmp_skb->sk); 1124 __sock_put(tmp_skb->sk);
1122 tmp_skb->destructor = NULL; 1125 tmp_skb->destructor = NULL;
1123 tmp_skb->sk = NULL; 1126 tmp_skb->sk = NULL;
1124#endif
1125 } 1127 }
1126 1128
1127 ipv6_addr_copy(final_dst, &fl->fl6_dst); 1129 ipv6_addr_copy(final_dst, &fl->fl6_dst);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 3b1c9fa184ae..ba3b0c267f75 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -882,6 +882,7 @@ ip6ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
882 t->parms.hop_limit = p->hop_limit; 882 t->parms.hop_limit = p->hop_limit;
883 t->parms.encap_limit = p->encap_limit; 883 t->parms.encap_limit = p->encap_limit;
884 t->parms.flowinfo = p->flowinfo; 884 t->parms.flowinfo = p->flowinfo;
885 t->parms.link = p->link;
885 ip6ip6_tnl_link_config(t); 886 ip6ip6_tnl_link_config(t);
886 return 0; 887 return 0;
887} 888}
diff --git a/net/ipv6/ipv6_syms.c b/net/ipv6/ipv6_syms.c
index 2f4c91ddc9a3..5ade5a5d1990 100644
--- a/net/ipv6/ipv6_syms.c
+++ b/net/ipv6/ipv6_syms.c
@@ -37,5 +37,4 @@ EXPORT_SYMBOL(in6_dev_finish_destroy);
37EXPORT_SYMBOL(xfrm6_rcv); 37EXPORT_SYMBOL(xfrm6_rcv);
38#endif 38#endif
39EXPORT_SYMBOL(rt6_lookup); 39EXPORT_SYMBOL(rt6_lookup);
40EXPORT_SYMBOL(fl6_sock_lookup);
41EXPORT_SYMBOL(ipv6_push_nfrag_opts); 40EXPORT_SYMBOL(ipv6_push_nfrag_opts);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 601a148f60f3..6b9867717d11 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -84,6 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
84 mtu = IPV6_MIN_MTU; 84 mtu = IPV6_MIN_MTU;
85 85
86 if (skb->len > mtu) { 86 if (skb->len > mtu) {
87 skb->dev = dst->dev;
87 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 88 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
88 ret = -EMSGSIZE; 89 ret = -EMSGSIZE;
89 } 90 }
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 4429b1a1fe5f..cf1d91e74c82 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -113,6 +113,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
113 113
114 xdst = (struct xfrm_dst *)dst1; 114 xdst = (struct xfrm_dst *)dst1;
115 xdst->route = &rt->u.dst; 115 xdst->route = &rt->u.dst;
116 if (rt->rt6i_node)
117 xdst->route_cookie = rt->rt6i_node->fn_sernum;
116 118
117 dst1->next = dst_prev; 119 dst1->next = dst_prev;
118 dst_prev = dst1; 120 dst_prev = dst1;
@@ -137,6 +139,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
137 139
138 dst_prev->child = &rt->u.dst; 140 dst_prev->child = &rt->u.dst;
139 dst->path = &rt->u.dst; 141 dst->path = &rt->u.dst;
142 if (rt->rt6i_node)
143 ((struct xfrm_dst *)dst)->path_cookie = rt->rt6i_node->fn_sernum;
140 144
141 *dst_p = dst; 145 *dst_p = dst;
142 dst = dst_prev; 146 dst = dst_prev;
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index d6ccd3239dcf..70543d89438b 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -470,6 +470,7 @@ void irda_device_unregister_dongle(struct dongle_reg *dongle)
470} 470}
471EXPORT_SYMBOL(irda_device_unregister_dongle); 471EXPORT_SYMBOL(irda_device_unregister_dongle);
472 472
473#ifdef CONFIG_ISA_DMA_API
473/* 474/*
474 * Function setup_dma (idev, buffer, count, mode) 475 * Function setup_dma (idev, buffer, count, mode)
475 * 476 *
@@ -492,3 +493,4 @@ void irda_setup_dma(int channel, dma_addr_t buffer, int count, int mode)
492 release_dma_lock(flags); 493 release_dma_lock(flags);
493} 494}
494EXPORT_SYMBOL(irda_setup_dma); 495EXPORT_SYMBOL(irda_setup_dma);
496#endif
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 4ee392066148..e41ce458c2a9 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -49,6 +49,8 @@
49#include <linux/bitops.h> 49#include <linux/bitops.h>
50#include <linux/mm.h> 50#include <linux/mm.h>
51#include <linux/types.h> 51#include <linux/types.h>
52#include <linux/audit.h>
53
52#include <net/sock.h> 54#include <net/sock.h>
53#include <net/scm.h> 55#include <net/scm.h>
54 56
@@ -733,11 +735,15 @@ static inline int do_one_broadcast(struct sock *sk,
733 735
734 sock_hold(sk); 736 sock_hold(sk);
735 if (p->skb2 == NULL) { 737 if (p->skb2 == NULL) {
736 if (atomic_read(&p->skb->users) != 1) { 738 if (skb_shared(p->skb)) {
737 p->skb2 = skb_clone(p->skb, p->allocation); 739 p->skb2 = skb_clone(p->skb, p->allocation);
738 } else { 740 } else {
739 p->skb2 = p->skb; 741 p->skb2 = skb_get(p->skb);
740 atomic_inc(&p->skb->users); 742 /*
743 * skb ownership may have been set when
744 * delivered to a previous socket.
745 */
746 skb_orphan(p->skb2);
741 } 747 }
742 } 748 }
743 if (p->skb2 == NULL) { 749 if (p->skb2 == NULL) {
@@ -783,11 +789,12 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
783 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) 789 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
784 do_one_broadcast(sk, &info); 790 do_one_broadcast(sk, &info);
785 791
792 kfree_skb(skb);
793
786 netlink_unlock_table(); 794 netlink_unlock_table();
787 795
788 if (info.skb2) 796 if (info.skb2)
789 kfree_skb(info.skb2); 797 kfree_skb(info.skb2);
790 kfree_skb(skb);
791 798
792 if (info.delivered) { 799 if (info.delivered) {
793 if (info.congested && (allocation & __GFP_WAIT)) 800 if (info.congested && (allocation & __GFP_WAIT))
@@ -904,6 +911,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
904 NETLINK_CB(skb).groups = nlk->groups; 911 NETLINK_CB(skb).groups = nlk->groups;
905 NETLINK_CB(skb).dst_pid = dst_pid; 912 NETLINK_CB(skb).dst_pid = dst_pid;
906 NETLINK_CB(skb).dst_groups = dst_groups; 913 NETLINK_CB(skb).dst_groups = dst_groups;
914 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
907 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 915 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
908 916
909 /* What can I do? Netlink is asynchronous, so that 917 /* What can I do? Netlink is asynchronous, so that
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 64acea0adaae..0269616e75a1 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Version: $Id: af_packet.c,v 1.61 2002/02/08 03:57:19 davem Exp $ 8 * Version: $Id: af_packet.c,v 1.61 2002/02/08 03:57:19 davem Exp $
9 * 9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org> 12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * 13 *
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index b0941186f867..b22c9beb604d 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -405,7 +405,7 @@ config NET_EMATCH_STACK
405 ---help--- 405 ---help---
406 Size of the local stack variable used while evaluating the tree of 406 Size of the local stack variable used while evaluating the tree of
407 ematches. Limits the depth of the tree, i.e. the number of 407 ematches. Limits the depth of the tree, i.e. the number of
408 encapsulated precedences. Every level requires 4 bytes of addtional 408 encapsulated precedences. Every level requires 4 bytes of additional
409 stack space. 409 stack space.
410 410
411config NET_EMATCH_CMP 411config NET_EMATCH_CMP
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index cafcb084098d..914c85ff8fe6 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -881,7 +881,7 @@ static int __init tc_action_init(void)
881 link_p[RTM_GETACTION-RTM_BASE].dumpit = tc_dump_action; 881 link_p[RTM_GETACTION-RTM_BASE].dumpit = tc_dump_action;
882 } 882 }
883 883
884 printk("TC classifier action (bugs to netdev@oss.sgi.com cc " 884 printk("TC classifier action (bugs to netdev@vger.kernel.org cc "
885 "hadi@cyberus.ca)\n"); 885 "hadi@cyberus.ca)\n");
886 return 0; 886 return 0;
887} 887}
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 0d2d4415f334..dfb300bb6baa 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -261,6 +261,9 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh,
261 rta = (struct rtattr *) b; 261 rta = (struct rtattr *) b;
262 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 262 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
263 263
264 if (f->res.classid)
265 RTA_PUT(skb, TCA_BASIC_CLASSID, sizeof(u32), &f->res.classid);
266
264 if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 || 267 if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 ||
265 tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0) 268 tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
266 goto rtattr_failure; 269 goto rtattr_failure;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index f1eeaf65cee5..48bb23c2a35a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -32,7 +32,7 @@
32 * +-----------+ +-----------+ 32 * +-----------+ +-----------+
33 * | | 33 * | |
34 * ---> meta_ops[INT][INDEV](...) | 34 * ---> meta_ops[INT][INDEV](...) |
35 * | | 35 * | |
36 * ----------- | 36 * ----------- |
37 * V V 37 * V V
38 * +-----------+ +-----------+ 38 * +-----------+ +-----------+
@@ -70,6 +70,7 @@
70#include <net/dst.h> 70#include <net/dst.h>
71#include <net/route.h> 71#include <net/route.h>
72#include <net/pkt_cls.h> 72#include <net/pkt_cls.h>
73#include <net/sock.h>
73 74
74struct meta_obj 75struct meta_obj
75{ 76{
@@ -284,6 +285,214 @@ META_COLLECTOR(int_rtiif)
284} 285}
285 286
286/************************************************************************** 287/**************************************************************************
288 * Socket Attributes
289 **************************************************************************/
290
291#define SKIP_NONLOCAL(skb) \
292 if (unlikely(skb->sk == NULL)) { \
293 *err = -1; \
294 return; \
295 }
296
297META_COLLECTOR(int_sk_family)
298{
299 SKIP_NONLOCAL(skb);
300 dst->value = skb->sk->sk_family;
301}
302
303META_COLLECTOR(int_sk_state)
304{
305 SKIP_NONLOCAL(skb);
306 dst->value = skb->sk->sk_state;
307}
308
309META_COLLECTOR(int_sk_reuse)
310{
311 SKIP_NONLOCAL(skb);
312 dst->value = skb->sk->sk_reuse;
313}
314
315META_COLLECTOR(int_sk_bound_if)
316{
317 SKIP_NONLOCAL(skb);
318 /* No error if bound_dev_if is 0, legal userspace check */
319 dst->value = skb->sk->sk_bound_dev_if;
320}
321
322META_COLLECTOR(var_sk_bound_if)
323{
324 SKIP_NONLOCAL(skb);
325
326 if (skb->sk->sk_bound_dev_if == 0) {
327 dst->value = (unsigned long) "any";
328 dst->len = 3;
329 } else {
330 struct net_device *dev;
331
332 dev = dev_get_by_index(skb->sk->sk_bound_dev_if);
333 *err = var_dev(dev, dst);
334 if (dev)
335 dev_put(dev);
336 }
337}
338
339META_COLLECTOR(int_sk_refcnt)
340{
341 SKIP_NONLOCAL(skb);
342 dst->value = atomic_read(&skb->sk->sk_refcnt);
343}
344
345META_COLLECTOR(int_sk_rcvbuf)
346{
347 SKIP_NONLOCAL(skb);
348 dst->value = skb->sk->sk_rcvbuf;
349}
350
351META_COLLECTOR(int_sk_shutdown)
352{
353 SKIP_NONLOCAL(skb);
354 dst->value = skb->sk->sk_shutdown;
355}
356
357META_COLLECTOR(int_sk_proto)
358{
359 SKIP_NONLOCAL(skb);
360 dst->value = skb->sk->sk_protocol;
361}
362
363META_COLLECTOR(int_sk_type)
364{
365 SKIP_NONLOCAL(skb);
366 dst->value = skb->sk->sk_type;
367}
368
369META_COLLECTOR(int_sk_rmem_alloc)
370{
371 SKIP_NONLOCAL(skb);
372 dst->value = atomic_read(&skb->sk->sk_rmem_alloc);
373}
374
375META_COLLECTOR(int_sk_wmem_alloc)
376{
377 SKIP_NONLOCAL(skb);
378 dst->value = atomic_read(&skb->sk->sk_wmem_alloc);
379}
380
381META_COLLECTOR(int_sk_omem_alloc)
382{
383 SKIP_NONLOCAL(skb);
384 dst->value = atomic_read(&skb->sk->sk_omem_alloc);
385}
386
387META_COLLECTOR(int_sk_rcv_qlen)
388{
389 SKIP_NONLOCAL(skb);
390 dst->value = skb->sk->sk_receive_queue.qlen;
391}
392
393META_COLLECTOR(int_sk_snd_qlen)
394{
395 SKIP_NONLOCAL(skb);
396 dst->value = skb->sk->sk_write_queue.qlen;
397}
398
399META_COLLECTOR(int_sk_wmem_queued)
400{
401 SKIP_NONLOCAL(skb);
402 dst->value = skb->sk->sk_wmem_queued;
403}
404
405META_COLLECTOR(int_sk_fwd_alloc)
406{
407 SKIP_NONLOCAL(skb);
408 dst->value = skb->sk->sk_forward_alloc;
409}
410
411META_COLLECTOR(int_sk_sndbuf)
412{
413 SKIP_NONLOCAL(skb);
414 dst->value = skb->sk->sk_sndbuf;
415}
416
417META_COLLECTOR(int_sk_alloc)
418{
419 SKIP_NONLOCAL(skb);
420 dst->value = skb->sk->sk_allocation;
421}
422
423META_COLLECTOR(int_sk_route_caps)
424{
425 SKIP_NONLOCAL(skb);
426 dst->value = skb->sk->sk_route_caps;
427}
428
429META_COLLECTOR(int_sk_hashent)
430{
431 SKIP_NONLOCAL(skb);
432 dst->value = skb->sk->sk_hashent;
433}
434
435META_COLLECTOR(int_sk_lingertime)
436{
437 SKIP_NONLOCAL(skb);
438 dst->value = skb->sk->sk_lingertime / HZ;
439}
440
441META_COLLECTOR(int_sk_err_qlen)
442{
443 SKIP_NONLOCAL(skb);
444 dst->value = skb->sk->sk_error_queue.qlen;
445}
446
447META_COLLECTOR(int_sk_ack_bl)
448{
449 SKIP_NONLOCAL(skb);
450 dst->value = skb->sk->sk_ack_backlog;
451}
452
453META_COLLECTOR(int_sk_max_ack_bl)
454{
455 SKIP_NONLOCAL(skb);
456 dst->value = skb->sk->sk_max_ack_backlog;
457}
458
459META_COLLECTOR(int_sk_prio)
460{
461 SKIP_NONLOCAL(skb);
462 dst->value = skb->sk->sk_priority;
463}
464
465META_COLLECTOR(int_sk_rcvlowat)
466{
467 SKIP_NONLOCAL(skb);
468 dst->value = skb->sk->sk_rcvlowat;
469}
470
471META_COLLECTOR(int_sk_rcvtimeo)
472{
473 SKIP_NONLOCAL(skb);
474 dst->value = skb->sk->sk_rcvtimeo / HZ;
475}
476
477META_COLLECTOR(int_sk_sndtimeo)
478{
479 SKIP_NONLOCAL(skb);
480 dst->value = skb->sk->sk_sndtimeo / HZ;
481}
482
483META_COLLECTOR(int_sk_sendmsg_off)
484{
485 SKIP_NONLOCAL(skb);
486 dst->value = skb->sk->sk_sndmsg_off;
487}
488
489META_COLLECTOR(int_sk_write_pend)
490{
491 SKIP_NONLOCAL(skb);
492 dst->value = skb->sk->sk_write_pending;
493}
494
495/**************************************************************************
287 * Meta value collectors assignment table 496 * Meta value collectors assignment table
288 **************************************************************************/ 497 **************************************************************************/
289 498
@@ -293,41 +502,75 @@ struct meta_ops
293 struct meta_value *, struct meta_obj *, int *); 502 struct meta_value *, struct meta_obj *, int *);
294}; 503};
295 504
505#define META_ID(name) TCF_META_ID_##name
506#define META_FUNC(name) { .get = meta_##name }
507
296/* Meta value operations table listing all meta value collectors and 508/* Meta value operations table listing all meta value collectors and
297 * assigns them to a type and meta id. */ 509 * assigns them to a type and meta id. */
298static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { 510static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
299 [TCF_META_TYPE_VAR] = { 511 [TCF_META_TYPE_VAR] = {
300 [TCF_META_ID_DEV] = { .get = meta_var_dev }, 512 [META_ID(DEV)] = META_FUNC(var_dev),
301 [TCF_META_ID_INDEV] = { .get = meta_var_indev }, 513 [META_ID(INDEV)] = META_FUNC(var_indev),
302 [TCF_META_ID_REALDEV] = { .get = meta_var_realdev } 514 [META_ID(REALDEV)] = META_FUNC(var_realdev),
515 [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
303 }, 516 },
304 [TCF_META_TYPE_INT] = { 517 [TCF_META_TYPE_INT] = {
305 [TCF_META_ID_RANDOM] = { .get = meta_int_random }, 518 [META_ID(RANDOM)] = META_FUNC(int_random),
306 [TCF_META_ID_LOADAVG_0] = { .get = meta_int_loadavg_0 }, 519 [META_ID(LOADAVG_0)] = META_FUNC(int_loadavg_0),
307 [TCF_META_ID_LOADAVG_1] = { .get = meta_int_loadavg_1 }, 520 [META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1),
308 [TCF_META_ID_LOADAVG_2] = { .get = meta_int_loadavg_2 }, 521 [META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2),
309 [TCF_META_ID_DEV] = { .get = meta_int_dev }, 522 [META_ID(DEV)] = META_FUNC(int_dev),
310 [TCF_META_ID_INDEV] = { .get = meta_int_indev }, 523 [META_ID(INDEV)] = META_FUNC(int_indev),
311 [TCF_META_ID_REALDEV] = { .get = meta_int_realdev }, 524 [META_ID(REALDEV)] = META_FUNC(int_realdev),
312 [TCF_META_ID_PRIORITY] = { .get = meta_int_priority }, 525 [META_ID(PRIORITY)] = META_FUNC(int_priority),
313 [TCF_META_ID_PROTOCOL] = { .get = meta_int_protocol }, 526 [META_ID(PROTOCOL)] = META_FUNC(int_protocol),
314 [TCF_META_ID_SECURITY] = { .get = meta_int_security }, 527 [META_ID(SECURITY)] = META_FUNC(int_security),
315 [TCF_META_ID_PKTTYPE] = { .get = meta_int_pkttype }, 528 [META_ID(PKTTYPE)] = META_FUNC(int_pkttype),
316 [TCF_META_ID_PKTLEN] = { .get = meta_int_pktlen }, 529 [META_ID(PKTLEN)] = META_FUNC(int_pktlen),
317 [TCF_META_ID_DATALEN] = { .get = meta_int_datalen }, 530 [META_ID(DATALEN)] = META_FUNC(int_datalen),
318 [TCF_META_ID_MACLEN] = { .get = meta_int_maclen }, 531 [META_ID(MACLEN)] = META_FUNC(int_maclen),
319#ifdef CONFIG_NETFILTER 532#ifdef CONFIG_NETFILTER
320 [TCF_META_ID_NFMARK] = { .get = meta_int_nfmark }, 533 [META_ID(NFMARK)] = META_FUNC(int_nfmark),
321#endif 534#endif
322 [TCF_META_ID_TCINDEX] = { .get = meta_int_tcindex }, 535 [META_ID(TCINDEX)] = META_FUNC(int_tcindex),
323#ifdef CONFIG_NET_CLS_ACT 536#ifdef CONFIG_NET_CLS_ACT
324 [TCF_META_ID_TCVERDICT] = { .get = meta_int_tcverd }, 537 [META_ID(TCVERDICT)] = META_FUNC(int_tcverd),
325 [TCF_META_ID_TCCLASSID] = { .get = meta_int_tcclassid }, 538 [META_ID(TCCLASSID)] = META_FUNC(int_tcclassid),
326#endif 539#endif
327#ifdef CONFIG_NET_CLS_ROUTE 540#ifdef CONFIG_NET_CLS_ROUTE
328 [TCF_META_ID_RTCLASSID] = { .get = meta_int_rtclassid }, 541 [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid),
329#endif 542#endif
330 [TCF_META_ID_RTIIF] = { .get = meta_int_rtiif } 543 [META_ID(RTIIF)] = META_FUNC(int_rtiif),
544 [META_ID(SK_FAMILY)] = META_FUNC(int_sk_family),
545 [META_ID(SK_STATE)] = META_FUNC(int_sk_state),
546 [META_ID(SK_REUSE)] = META_FUNC(int_sk_reuse),
547 [META_ID(SK_BOUND_IF)] = META_FUNC(int_sk_bound_if),
548 [META_ID(SK_REFCNT)] = META_FUNC(int_sk_refcnt),
549 [META_ID(SK_RCVBUF)] = META_FUNC(int_sk_rcvbuf),
550 [META_ID(SK_SNDBUF)] = META_FUNC(int_sk_sndbuf),
551 [META_ID(SK_SHUTDOWN)] = META_FUNC(int_sk_shutdown),
552 [META_ID(SK_PROTO)] = META_FUNC(int_sk_proto),
553 [META_ID(SK_TYPE)] = META_FUNC(int_sk_type),
554 [META_ID(SK_RMEM_ALLOC)] = META_FUNC(int_sk_rmem_alloc),
555 [META_ID(SK_WMEM_ALLOC)] = META_FUNC(int_sk_wmem_alloc),
556 [META_ID(SK_OMEM_ALLOC)] = META_FUNC(int_sk_omem_alloc),
557 [META_ID(SK_WMEM_QUEUED)] = META_FUNC(int_sk_wmem_queued),
558 [META_ID(SK_RCV_QLEN)] = META_FUNC(int_sk_rcv_qlen),
559 [META_ID(SK_SND_QLEN)] = META_FUNC(int_sk_snd_qlen),
560 [META_ID(SK_ERR_QLEN)] = META_FUNC(int_sk_err_qlen),
561 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc),
562 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc),
563 [META_ID(SK_ROUTE_CAPS)] = META_FUNC(int_sk_route_caps),
564 [META_ID(SK_HASHENT)] = META_FUNC(int_sk_hashent),
565 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime),
566 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl),
567 [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl),
568 [META_ID(SK_PRIO)] = META_FUNC(int_sk_prio),
569 [META_ID(SK_RCVLOWAT)] = META_FUNC(int_sk_rcvlowat),
570 [META_ID(SK_RCVTIMEO)] = META_FUNC(int_sk_rcvtimeo),
571 [META_ID(SK_SNDTIMEO)] = META_FUNC(int_sk_sndtimeo),
572 [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off),
573 [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend),
331 } 574 }
332}; 575};
333 576
@@ -396,9 +639,9 @@ static int meta_int_compare(struct meta_obj *a, struct meta_obj *b)
396 /* Let gcc optimize it, the unlikely is not really based on 639 /* Let gcc optimize it, the unlikely is not really based on
397 * some numbers but jump free code for mismatches seems 640 * some numbers but jump free code for mismatches seems
398 * more logical. */ 641 * more logical. */
399 if (unlikely(a == b)) 642 if (unlikely(a->value == b->value))
400 return 0; 643 return 0;
401 else if (a < b) 644 else if (a->value < b->value)
402 return -1; 645 return -1;
403 else 646 else
404 return 1; 647 return 1;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 8a3db9d95bab..d8bd2a569c7c 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -18,7 +18,7 @@
18#include <asm/byteorder.h> 18#include <asm/byteorder.h>
19 19
20 20
21#if 1 /* control */ 21#if 0 /* control */
22#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) 22#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
23#else 23#else
24#define DPRINTK(format,args...) 24#define DPRINTK(format,args...)
@@ -73,8 +73,13 @@ static int dsmark_graft(struct Qdisc *sch,unsigned long arg,
73 73
74 DPRINTK("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",sch,p,new, 74 DPRINTK("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",sch,p,new,
75 old); 75 old);
76 if (!new) 76
77 new = &noop_qdisc; 77 if (new == NULL) {
78 new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
79 if (new == NULL)
80 new = &noop_qdisc;
81 }
82
78 sch_tree_lock(sch); 83 sch_tree_lock(sch);
79 *old = xchg(&p->q,new); 84 *old = xchg(&p->q,new);
80 if (*old) 85 if (*old)
@@ -163,14 +168,15 @@ static void dsmark_walk(struct Qdisc *sch,struct qdisc_walker *walker)
163 return; 168 return;
164 for (i = 0; i < p->indices; i++) { 169 for (i = 0; i < p->indices; i++) {
165 if (p->mask[i] == 0xff && !p->value[i]) 170 if (p->mask[i] == 0xff && !p->value[i])
166 continue; 171 goto ignore;
167 if (walker->count >= walker->skip) { 172 if (walker->count >= walker->skip) {
168 if (walker->fn(sch, i+1, walker) < 0) { 173 if (walker->fn(sch, i+1, walker) < 0) {
169 walker->stop = 1; 174 walker->stop = 1;
170 break; 175 break;
171 } 176 }
172 } 177 }
173 walker->count++; 178ignore:
179 walker->count++;
174 } 180 }
175} 181}
176 182
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e0c9fbe73b15..bb9bf8d5003c 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -53,7 +53,6 @@
53 53
54struct netem_sched_data { 54struct netem_sched_data {
55 struct Qdisc *qdisc; 55 struct Qdisc *qdisc;
56 struct sk_buff_head delayed;
57 struct timer_list timer; 56 struct timer_list timer;
58 57
59 u32 latency; 58 u32 latency;
@@ -63,11 +62,12 @@ struct netem_sched_data {
63 u32 gap; 62 u32 gap;
64 u32 jitter; 63 u32 jitter;
65 u32 duplicate; 64 u32 duplicate;
65 u32 reorder;
66 66
67 struct crndstate { 67 struct crndstate {
68 unsigned long last; 68 unsigned long last;
69 unsigned long rho; 69 unsigned long rho;
70 } delay_cor, loss_cor, dup_cor; 70 } delay_cor, loss_cor, dup_cor, reorder_cor;
71 71
72 struct disttable { 72 struct disttable {
73 u32 size; 73 u32 size;
@@ -137,122 +137,68 @@ static long tabledist(unsigned long mu, long sigma,
137 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; 137 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
138} 138}
139 139
140/* Put skb in the private delayed queue. */
141static int netem_delay(struct Qdisc *sch, struct sk_buff *skb)
142{
143 struct netem_sched_data *q = qdisc_priv(sch);
144 psched_tdiff_t td;
145 psched_time_t now;
146
147 PSCHED_GET_TIME(now);
148 td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist);
149
150 /* Always queue at tail to keep packets in order */
151 if (likely(q->delayed.qlen < q->limit)) {
152 struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
153
154 PSCHED_TADD2(now, td, cb->time_to_send);
155
156 pr_debug("netem_delay: skb=%p now=%llu tosend=%llu\n", skb,
157 now, cb->time_to_send);
158
159 __skb_queue_tail(&q->delayed, skb);
160 return NET_XMIT_SUCCESS;
161 }
162
163 pr_debug("netem_delay: queue over limit %d\n", q->limit);
164 sch->qstats.overlimits++;
165 kfree_skb(skb);
166 return NET_XMIT_DROP;
167}
168
169/* 140/*
170 * Move a packet that is ready to send from the delay holding 141 * Insert one skb into qdisc.
171 * list to the underlying qdisc. 142 * Note: parent depends on return value to account for queue length.
143 * NET_XMIT_DROP: queue length didn't change.
144 * NET_XMIT_SUCCESS: one skb was queued.
172 */ 145 */
173static int netem_run(struct Qdisc *sch)
174{
175 struct netem_sched_data *q = qdisc_priv(sch);
176 struct sk_buff *skb;
177 psched_time_t now;
178
179 PSCHED_GET_TIME(now);
180
181 skb = skb_peek(&q->delayed);
182 if (skb) {
183 const struct netem_skb_cb *cb
184 = (const struct netem_skb_cb *)skb->cb;
185 long delay
186 = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
187 pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay);
188
189 /* if more time remaining? */
190 if (delay > 0) {
191 mod_timer(&q->timer, jiffies + delay);
192 return 1;
193 }
194
195 __skb_unlink(skb, &q->delayed);
196
197 if (q->qdisc->enqueue(skb, q->qdisc)) {
198 sch->q.qlen--;
199 sch->qstats.drops++;
200 }
201 }
202
203 return 0;
204}
205
206static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) 146static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
207{ 147{
208 struct netem_sched_data *q = qdisc_priv(sch); 148 struct netem_sched_data *q = qdisc_priv(sch);
149 struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
150 struct sk_buff *skb2;
209 int ret; 151 int ret;
152 int count = 1;
210 153
211 pr_debug("netem_enqueue skb=%p\n", skb); 154 pr_debug("netem_enqueue skb=%p\n", skb);
212 155
156 /* Random duplication */
157 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
158 ++count;
159
213 /* Random packet drop 0 => none, ~0 => all */ 160 /* Random packet drop 0 => none, ~0 => all */
214 if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { 161 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
215 pr_debug("netem_enqueue: random loss\n"); 162 --count;
163
164 if (count == 0) {
216 sch->qstats.drops++; 165 sch->qstats.drops++;
217 kfree_skb(skb); 166 kfree_skb(skb);
218 return 0; /* lie about loss so TCP doesn't know */ 167 return NET_XMIT_DROP;
219 } 168 }
220 169
221 /* Random duplication */ 170 /*
222 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) { 171 * If we need to duplicate packet, then re-insert at top of the
223 struct sk_buff *skb2; 172 * qdisc tree, since parent queuer expects that only one
224 173 * skb will be queued.
225 skb2 = skb_clone(skb, GFP_ATOMIC); 174 */
226 if (skb2 && netem_delay(sch, skb2) == NET_XMIT_SUCCESS) { 175 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
227 struct Qdisc *qp; 176 struct Qdisc *rootq = sch->dev->qdisc;
228 177 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
229 /* Since one packet can generate two packets in the 178 q->duplicate = 0;
230 * queue, the parent's qlen accounting gets confused, 179
231 * so fix it. 180 rootq->enqueue(skb2, rootq);
232 */ 181 q->duplicate = dupsave;
233 qp = qdisc_lookup(sch->dev, TC_H_MAJ(sch->parent));
234 if (qp)
235 qp->q.qlen++;
236
237 sch->q.qlen++;
238 sch->bstats.bytes += skb2->len;
239 sch->bstats.packets++;
240 } else
241 sch->qstats.drops++;
242 } 182 }
243 183
244 /* If doing simple delay then gap == 0 so all packets 184 if (q->gap == 0 /* not doing reordering */
245 * go into the delayed holding queue 185 || q->counter < q->gap /* inside last reordering gap */
246 * otherwise if doing out of order only "1 out of gap" 186 || q->reorder < get_crandom(&q->reorder_cor)) {
247 * packets will be delayed. 187 psched_time_t now;
248 */ 188 PSCHED_GET_TIME(now);
249 if (q->counter < q->gap) { 189 PSCHED_TADD2(now, tabledist(q->latency, q->jitter,
190 &q->delay_cor, q->delay_dist),
191 cb->time_to_send);
250 ++q->counter; 192 ++q->counter;
251 ret = q->qdisc->enqueue(skb, q->qdisc); 193 ret = q->qdisc->enqueue(skb, q->qdisc);
252 } else { 194 } else {
195 /*
196 * Do re-ordering by putting one out of N packets at the front
197 * of the queue.
198 */
199 PSCHED_GET_TIME(cb->time_to_send);
253 q->counter = 0; 200 q->counter = 0;
254 ret = netem_delay(sch, skb); 201 ret = q->qdisc->ops->requeue(skb, q->qdisc);
255 netem_run(sch);
256 } 202 }
257 203
258 if (likely(ret == NET_XMIT_SUCCESS)) { 204 if (likely(ret == NET_XMIT_SUCCESS)) {
@@ -296,22 +242,33 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
296{ 242{
297 struct netem_sched_data *q = qdisc_priv(sch); 243 struct netem_sched_data *q = qdisc_priv(sch);
298 struct sk_buff *skb; 244 struct sk_buff *skb;
299 int pending;
300
301 pending = netem_run(sch);
302 245
303 skb = q->qdisc->dequeue(q->qdisc); 246 skb = q->qdisc->dequeue(q->qdisc);
304 if (skb) { 247 if (skb) {
305 pr_debug("netem_dequeue: return skb=%p\n", skb); 248 const struct netem_skb_cb *cb
306 sch->q.qlen--; 249 = (const struct netem_skb_cb *)skb->cb;
307 sch->flags &= ~TCQ_F_THROTTLED; 250 psched_time_t now;
308 } 251 long delay;
309 else if (pending) { 252
310 pr_debug("netem_dequeue: throttling\n"); 253 /* if more time remaining? */
254 PSCHED_GET_TIME(now);
255 delay = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
256 pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay);
257 if (delay <= 0) {
258 pr_debug("netem_dequeue: return skb=%p\n", skb);
259 sch->q.qlen--;
260 sch->flags &= ~TCQ_F_THROTTLED;
261 return skb;
262 }
263
264 mod_timer(&q->timer, jiffies + delay);
311 sch->flags |= TCQ_F_THROTTLED; 265 sch->flags |= TCQ_F_THROTTLED;
312 }
313 266
314 return skb; 267 if (q->qdisc->ops->requeue(skb, q->qdisc) != 0)
268 sch->qstats.drops++;
269 }
270
271 return NULL;
315} 272}
316 273
317static void netem_watchdog(unsigned long arg) 274static void netem_watchdog(unsigned long arg)
@@ -328,8 +285,6 @@ static void netem_reset(struct Qdisc *sch)
328 struct netem_sched_data *q = qdisc_priv(sch); 285 struct netem_sched_data *q = qdisc_priv(sch);
329 286
330 qdisc_reset(q->qdisc); 287 qdisc_reset(q->qdisc);
331 skb_queue_purge(&q->delayed);
332
333 sch->q.qlen = 0; 288 sch->q.qlen = 0;
334 sch->flags &= ~TCQ_F_THROTTLED; 289 sch->flags &= ~TCQ_F_THROTTLED;
335 del_timer_sync(&q->timer); 290 del_timer_sync(&q->timer);
@@ -397,6 +352,19 @@ static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
397 return 0; 352 return 0;
398} 353}
399 354
355static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
356{
357 struct netem_sched_data *q = qdisc_priv(sch);
358 const struct tc_netem_reorder *r = RTA_DATA(attr);
359
360 if (RTA_PAYLOAD(attr) != sizeof(*r))
361 return -EINVAL;
362
363 q->reorder = r->probability;
364 init_crandom(&q->reorder_cor, r->correlation);
365 return 0;
366}
367
400static int netem_change(struct Qdisc *sch, struct rtattr *opt) 368static int netem_change(struct Qdisc *sch, struct rtattr *opt)
401{ 369{
402 struct netem_sched_data *q = qdisc_priv(sch); 370 struct netem_sched_data *q = qdisc_priv(sch);
@@ -417,9 +385,15 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
417 q->jitter = qopt->jitter; 385 q->jitter = qopt->jitter;
418 q->limit = qopt->limit; 386 q->limit = qopt->limit;
419 q->gap = qopt->gap; 387 q->gap = qopt->gap;
388 q->counter = 0;
420 q->loss = qopt->loss; 389 q->loss = qopt->loss;
421 q->duplicate = qopt->duplicate; 390 q->duplicate = qopt->duplicate;
422 391
392 /* for compatiablity with earlier versions.
393 * if gap is set, need to assume 100% probablity
394 */
395 q->reorder = ~0;
396
423 /* Handle nested options after initial queue options. 397 /* Handle nested options after initial queue options.
424 * Should have put all options in nested format but too late now. 398 * Should have put all options in nested format but too late now.
425 */ 399 */
@@ -441,6 +415,11 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
441 if (ret) 415 if (ret)
442 return ret; 416 return ret;
443 } 417 }
418 if (tb[TCA_NETEM_REORDER-1]) {
419 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
420 if (ret)
421 return ret;
422 }
444 } 423 }
445 424
446 425
@@ -455,11 +434,9 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt)
455 if (!opt) 434 if (!opt)
456 return -EINVAL; 435 return -EINVAL;
457 436
458 skb_queue_head_init(&q->delayed);
459 init_timer(&q->timer); 437 init_timer(&q->timer);
460 q->timer.function = netem_watchdog; 438 q->timer.function = netem_watchdog;
461 q->timer.data = (unsigned long) sch; 439 q->timer.data = (unsigned long) sch;
462 q->counter = 0;
463 440
464 q->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); 441 q->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
465 if (!q->qdisc) { 442 if (!q->qdisc) {
@@ -491,6 +468,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
491 struct rtattr *rta = (struct rtattr *) b; 468 struct rtattr *rta = (struct rtattr *) b;
492 struct tc_netem_qopt qopt; 469 struct tc_netem_qopt qopt;
493 struct tc_netem_corr cor; 470 struct tc_netem_corr cor;
471 struct tc_netem_reorder reorder;
494 472
495 qopt.latency = q->latency; 473 qopt.latency = q->latency;
496 qopt.jitter = q->jitter; 474 qopt.jitter = q->jitter;
@@ -504,6 +482,11 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
504 cor.loss_corr = q->loss_cor.rho; 482 cor.loss_corr = q->loss_cor.rho;
505 cor.dup_corr = q->dup_cor.rho; 483 cor.dup_corr = q->dup_cor.rho;
506 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); 484 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
485
486 reorder.probability = q->reorder;
487 reorder.correlation = q->reorder_cor.rho;
488 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
489
507 rta->rta_len = skb->tail - b; 490 rta->rta_len = skb->tail - b;
508 491
509 return skb->len; 492 return skb->len;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index b719a77d66b4..fffc880a646d 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -178,6 +178,37 @@ int sctp_rcv(struct sk_buff *skb)
178 178
179 asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport); 179 asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport);
180 180
181 if (!asoc)
182 ep = __sctp_rcv_lookup_endpoint(&dest);
183
184 /* Retrieve the common input handling substructure. */
185 rcvr = asoc ? &asoc->base : &ep->base;
186 sk = rcvr->sk;
187
188 /*
189 * If a frame arrives on an interface and the receiving socket is
190 * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
191 */
192 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
193 {
194 sock_put(sk);
195 if (asoc) {
196 sctp_association_put(asoc);
197 asoc = NULL;
198 } else {
199 sctp_endpoint_put(ep);
200 ep = NULL;
201 }
202 sk = sctp_get_ctl_sock();
203 ep = sctp_sk(sk)->ep;
204 sctp_endpoint_hold(ep);
205 sock_hold(sk);
206 rcvr = &ep->base;
207 }
208
209 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
210 goto discard_release;
211
181 /* 212 /*
182 * RFC 2960, 8.4 - Handle "Out of the blue" Packets. 213 * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
183 * An SCTP packet is called an "out of the blue" (OOTB) 214 * An SCTP packet is called an "out of the blue" (OOTB)
@@ -187,22 +218,12 @@ int sctp_rcv(struct sk_buff *skb)
187 * packet belongs. 218 * packet belongs.
188 */ 219 */
189 if (!asoc) { 220 if (!asoc) {
190 ep = __sctp_rcv_lookup_endpoint(&dest);
191 if (sctp_rcv_ootb(skb)) { 221 if (sctp_rcv_ootb(skb)) {
192 SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES); 222 SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
193 goto discard_release; 223 goto discard_release;
194 } 224 }
195 } 225 }
196 226
197 /* Retrieve the common input handling substructure. */
198 rcvr = asoc ? &asoc->base : &ep->base;
199 sk = rcvr->sk;
200
201 if ((sk) && (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)) {
202 goto discard_release;
203 }
204
205
206 /* SCTP seems to always need a timestamp right now (FIXME) */ 227 /* SCTP seems to always need a timestamp right now (FIXME) */
207 if (skb->stamp.tv_sec == 0) { 228 if (skb->stamp.tv_sec == 0) {
208 do_gettimeofday(&skb->stamp); 229 do_gettimeofday(&skb->stamp);
@@ -265,13 +286,11 @@ discard_it:
265 286
266discard_release: 287discard_release:
267 /* Release any structures we may be holding. */ 288 /* Release any structures we may be holding. */
268 if (asoc) { 289 sock_put(sk);
269 sock_put(asoc->base.sk); 290 if (asoc)
270 sctp_association_put(asoc); 291 sctp_association_put(asoc);
271 } else { 292 else
272 sock_put(ep->base.sk);
273 sctp_endpoint_put(ep); 293 sctp_endpoint_put(ep);
274 }
275 294
276 goto discard_it; 295 goto discard_it;
277} 296}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index c9d9ea064734..c7e42d125b9c 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -812,26 +812,23 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
812 if (addr->sa.sa_family != AF_INET6) 812 if (addr->sa.sa_family != AF_INET6)
813 af = sctp_get_af_specific(addr->sa.sa_family); 813 af = sctp_get_af_specific(addr->sa.sa_family);
814 else { 814 else {
815 struct sock *sk;
816 int type = ipv6_addr_type(&addr->v6.sin6_addr); 815 int type = ipv6_addr_type(&addr->v6.sin6_addr);
817 sk = sctp_opt2sk(opt); 816 struct net_device *dev;
817
818 if (type & IPV6_ADDR_LINKLOCAL) { 818 if (type & IPV6_ADDR_LINKLOCAL) {
819 /* Note: Behavior similar to af_inet6.c: 819 if (!addr->v6.sin6_scope_id)
820 * 1) Overrides previous bound_dev_if 820 return 0;
821 * 2) Destructive even if bind isn't successful. 821 dev = dev_get_by_index(addr->v6.sin6_scope_id);
822 */ 822 if (!dev)
823
824 if (addr->v6.sin6_scope_id)
825 sk->sk_bound_dev_if = addr->v6.sin6_scope_id;
826 if (!sk->sk_bound_dev_if)
827 return 0; 823 return 0;
824 dev_put(dev);
828 } 825 }
829 af = opt->pf->af; 826 af = opt->pf->af;
830 } 827 }
831 return af->available(addr, opt); 828 return af->available(addr, opt);
832} 829}
833 830
834/* Verify that the provided sockaddr looks bindable. Common verification, 831/* Verify that the provided sockaddr looks sendable. Common verification,
835 * has already been taken care of. 832 * has already been taken care of.
836 */ 833 */
837static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr) 834static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
@@ -842,19 +839,16 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
842 if (addr->sa.sa_family != AF_INET6) 839 if (addr->sa.sa_family != AF_INET6)
843 af = sctp_get_af_specific(addr->sa.sa_family); 840 af = sctp_get_af_specific(addr->sa.sa_family);
844 else { 841 else {
845 struct sock *sk;
846 int type = ipv6_addr_type(&addr->v6.sin6_addr); 842 int type = ipv6_addr_type(&addr->v6.sin6_addr);
847 sk = sctp_opt2sk(opt); 843 struct net_device *dev;
844
848 if (type & IPV6_ADDR_LINKLOCAL) { 845 if (type & IPV6_ADDR_LINKLOCAL) {
849 /* Note: Behavior similar to af_inet6.c: 846 if (!addr->v6.sin6_scope_id)
850 * 1) Overrides previous bound_dev_if 847 return 0;
851 * 2) Destructive even if bind isn't successful. 848 dev = dev_get_by_index(addr->v6.sin6_scope_id);
852 */ 849 if (!dev)
853
854 if (addr->v6.sin6_scope_id)
855 sk->sk_bound_dev_if = addr->v6.sin6_scope_id;
856 if (!sk->sk_bound_dev_if)
857 return 0; 850 return 0;
851 dev_put(dev);
858 } 852 }
859 af = opt->pf->af; 853 af = opt->pf->af;
860 } 854 }
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index e42fd8c2916b..98d49ec9b74b 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -132,14 +132,25 @@ void sctp_snmp_proc_exit(void)
132static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_common *epb) 132static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_common *epb)
133{ 133{
134 struct list_head *pos; 134 struct list_head *pos;
135 struct sctp_association *asoc;
135 struct sctp_sockaddr_entry *laddr; 136 struct sctp_sockaddr_entry *laddr;
136 union sctp_addr *addr; 137 struct sctp_transport *peer;
138 union sctp_addr *addr, *primary = NULL;
137 struct sctp_af *af; 139 struct sctp_af *af;
138 140
141 if (epb->type == SCTP_EP_TYPE_ASSOCIATION) {
142 asoc = sctp_assoc(epb);
143 peer = asoc->peer.primary_path;
144 primary = &peer->saddr;
145 }
146
139 list_for_each(pos, &epb->bind_addr.address_list) { 147 list_for_each(pos, &epb->bind_addr.address_list) {
140 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 148 laddr = list_entry(pos, struct sctp_sockaddr_entry, list);
141 addr = (union sctp_addr *)&laddr->a; 149 addr = (union sctp_addr *)&laddr->a;
142 af = sctp_get_af_specific(addr->sa.sa_family); 150 af = sctp_get_af_specific(addr->sa.sa_family);
151 if (primary && af->cmp_addr(addr, primary)) {
152 seq_printf(seq, "*");
153 }
143 af->seq_dump_addr(seq, addr); 154 af->seq_dump_addr(seq, addr);
144 } 155 }
145} 156}
@@ -149,17 +160,54 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa
149{ 160{
150 struct list_head *pos; 161 struct list_head *pos;
151 struct sctp_transport *transport; 162 struct sctp_transport *transport;
152 union sctp_addr *addr; 163 union sctp_addr *addr, *primary;
153 struct sctp_af *af; 164 struct sctp_af *af;
154 165
166 primary = &(assoc->peer.primary_addr);
155 list_for_each(pos, &assoc->peer.transport_addr_list) { 167 list_for_each(pos, &assoc->peer.transport_addr_list) {
156 transport = list_entry(pos, struct sctp_transport, transports); 168 transport = list_entry(pos, struct sctp_transport, transports);
157 addr = (union sctp_addr *)&transport->ipaddr; 169 addr = (union sctp_addr *)&transport->ipaddr;
158 af = sctp_get_af_specific(addr->sa.sa_family); 170 af = sctp_get_af_specific(addr->sa.sa_family);
171 if (af->cmp_addr(addr, primary)) {
172 seq_printf(seq, "*");
173 }
159 af->seq_dump_addr(seq, addr); 174 af->seq_dump_addr(seq, addr);
160 } 175 }
161} 176}
162 177
178static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
179{
180 if (*pos > sctp_ep_hashsize)
181 return NULL;
182
183 if (*pos < 0)
184 *pos = 0;
185
186 if (*pos == 0)
187 seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n");
188
189 ++*pos;
190
191 return (void *)pos;
192}
193
194static void sctp_eps_seq_stop(struct seq_file *seq, void *v)
195{
196 return;
197}
198
199
200static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos)
201{
202 if (*pos > sctp_ep_hashsize)
203 return NULL;
204
205 ++*pos;
206
207 return pos;
208}
209
210
163/* Display sctp endpoints (/proc/net/sctp/eps). */ 211/* Display sctp endpoints (/proc/net/sctp/eps). */
164static int sctp_eps_seq_show(struct seq_file *seq, void *v) 212static int sctp_eps_seq_show(struct seq_file *seq, void *v)
165{ 213{
@@ -167,38 +215,50 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
167 struct sctp_ep_common *epb; 215 struct sctp_ep_common *epb;
168 struct sctp_endpoint *ep; 216 struct sctp_endpoint *ep;
169 struct sock *sk; 217 struct sock *sk;
170 int hash; 218 int hash = *(int *)v;
171 219
172 seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT LADDRS\n"); 220 if (hash > sctp_ep_hashsize)
173 for (hash = 0; hash < sctp_ep_hashsize; hash++) { 221 return -ENOMEM;
174 head = &sctp_ep_hashtable[hash]; 222
175 read_lock(&head->lock); 223 head = &sctp_ep_hashtable[hash-1];
176 for (epb = head->chain; epb; epb = epb->next) { 224 sctp_local_bh_disable();
177 ep = sctp_ep(epb); 225 read_lock(&head->lock);
178 sk = epb->sk; 226 for (epb = head->chain; epb; epb = epb->next) {
179 seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d ", ep, sk, 227 ep = sctp_ep(epb);
180 sctp_sk(sk)->type, sk->sk_state, hash, 228 sk = epb->sk;
181 epb->bind_addr.port); 229 seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
182 sctp_seq_dump_local_addrs(seq, epb); 230 sctp_sk(sk)->type, sk->sk_state, hash-1,
183 seq_printf(seq, "\n"); 231 epb->bind_addr.port,
184 } 232 sock_i_uid(sk), sock_i_ino(sk));
185 read_unlock(&head->lock); 233
234 sctp_seq_dump_local_addrs(seq, epb);
235 seq_printf(seq, "\n");
186 } 236 }
237 read_unlock(&head->lock);
238 sctp_local_bh_enable();
187 239
188 return 0; 240 return 0;
189} 241}
190 242
243static struct seq_operations sctp_eps_ops = {
244 .start = sctp_eps_seq_start,
245 .next = sctp_eps_seq_next,
246 .stop = sctp_eps_seq_stop,
247 .show = sctp_eps_seq_show,
248};
249
250
191/* Initialize the seq file operations for 'eps' object. */ 251/* Initialize the seq file operations for 'eps' object. */
192static int sctp_eps_seq_open(struct inode *inode, struct file *file) 252static int sctp_eps_seq_open(struct inode *inode, struct file *file)
193{ 253{
194 return single_open(file, sctp_eps_seq_show, NULL); 254 return seq_open(file, &sctp_eps_ops);
195} 255}
196 256
197static struct file_operations sctp_eps_seq_fops = { 257static struct file_operations sctp_eps_seq_fops = {
198 .open = sctp_eps_seq_open, 258 .open = sctp_eps_seq_open,
199 .read = seq_read, 259 .read = seq_read,
200 .llseek = seq_lseek, 260 .llseek = seq_lseek,
201 .release = single_release, 261 .release = seq_release,
202}; 262};
203 263
204/* Set up the proc fs entry for 'eps' object. */ 264/* Set up the proc fs entry for 'eps' object. */
@@ -221,6 +281,40 @@ void sctp_eps_proc_exit(void)
221 remove_proc_entry("eps", proc_net_sctp); 281 remove_proc_entry("eps", proc_net_sctp);
222} 282}
223 283
284
285static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
286{
287 if (*pos > sctp_assoc_hashsize)
288 return NULL;
289
290 if (*pos < 0)
291 *pos = 0;
292
293 if (*pos == 0)
294 seq_printf(seq, " ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT "
295 "RPORT LADDRS <-> RADDRS\n");
296
297 ++*pos;
298
299 return (void *)pos;
300}
301
302static void sctp_assocs_seq_stop(struct seq_file *seq, void *v)
303{
304 return;
305}
306
307
308static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos)
309{
310 if (*pos > sctp_assoc_hashsize)
311 return NULL;
312
313 ++*pos;
314
315 return pos;
316}
317
224/* Display sctp associations (/proc/net/sctp/assocs). */ 318/* Display sctp associations (/proc/net/sctp/assocs). */
225static int sctp_assocs_seq_show(struct seq_file *seq, void *v) 319static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
226{ 320{
@@ -228,43 +322,57 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
228 struct sctp_ep_common *epb; 322 struct sctp_ep_common *epb;
229 struct sctp_association *assoc; 323 struct sctp_association *assoc;
230 struct sock *sk; 324 struct sock *sk;
231 int hash; 325 int hash = *(int *)v;
232 326
233 seq_printf(seq, " ASSOC SOCK STY SST ST HBKT LPORT RPORT " 327 if (hash > sctp_assoc_hashsize)
234 "LADDRS <-> RADDRS\n"); 328 return -ENOMEM;
235 for (hash = 0; hash < sctp_assoc_hashsize; hash++) { 329
236 head = &sctp_assoc_hashtable[hash]; 330 head = &sctp_assoc_hashtable[hash-1];
237 read_lock(&head->lock); 331 sctp_local_bh_disable();
238 for (epb = head->chain; epb; epb = epb->next) { 332 read_lock(&head->lock);
239 assoc = sctp_assoc(epb); 333 for (epb = head->chain; epb; epb = epb->next) {
240 sk = epb->sk; 334 assoc = sctp_assoc(epb);
241 seq_printf(seq, 335 sk = epb->sk;
242 "%8p %8p %-3d %-3d %-2d %-4d %-5d %-5d ", 336 seq_printf(seq,
243 assoc, sk, sctp_sk(sk)->type, sk->sk_state, 337 "%8p %8p %-3d %-3d %-2d %-4d %4d %8d %8d %7d %5lu %-5d %5d ",
244 assoc->state, hash, epb->bind_addr.port, 338 assoc, sk, sctp_sk(sk)->type, sk->sk_state,
245 assoc->peer.port); 339 assoc->state, hash-1, assoc->assoc_id,
246 sctp_seq_dump_local_addrs(seq, epb); 340 (sk->sk_rcvbuf - assoc->rwnd),
247 seq_printf(seq, "<-> "); 341 assoc->sndbuf_used,
248 sctp_seq_dump_remote_addrs(seq, assoc); 342 sock_i_uid(sk), sock_i_ino(sk),
249 seq_printf(seq, "\n"); 343 epb->bind_addr.port,
250 } 344 assoc->peer.port);
251 read_unlock(&head->lock); 345
346 seq_printf(seq, " ");
347 sctp_seq_dump_local_addrs(seq, epb);
348 seq_printf(seq, "<-> ");
349 sctp_seq_dump_remote_addrs(seq, assoc);
350 seq_printf(seq, "\n");
252 } 351 }
352 read_unlock(&head->lock);
353 sctp_local_bh_enable();
253 354
254 return 0; 355 return 0;
255} 356}
256 357
358static struct seq_operations sctp_assoc_ops = {
359 .start = sctp_assocs_seq_start,
360 .next = sctp_assocs_seq_next,
361 .stop = sctp_assocs_seq_stop,
362 .show = sctp_assocs_seq_show,
363};
364
257/* Initialize the seq file operations for 'assocs' object. */ 365/* Initialize the seq file operations for 'assocs' object. */
258static int sctp_assocs_seq_open(struct inode *inode, struct file *file) 366static int sctp_assocs_seq_open(struct inode *inode, struct file *file)
259{ 367{
260 return single_open(file, sctp_assocs_seq_show, NULL); 368 return seq_open(file, &sctp_assoc_ops);
261} 369}
262 370
263static struct file_operations sctp_assocs_seq_fops = { 371static struct file_operations sctp_assocs_seq_fops = {
264 .open = sctp_assocs_seq_open, 372 .open = sctp_assocs_seq_open,
265 .read = seq_read, 373 .read = seq_read,
266 .llseek = seq_lseek, 374 .llseek = seq_lseek,
267 .release = single_release, 375 .release = seq_release,
268}; 376};
269 377
270/* Set up the proc fs entry for 'assocs' object. */ 378/* Set up the proc fs entry for 'assocs' object. */
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 2e1f9c3556f5..5135e1a25d25 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -378,10 +378,13 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
378{ 378{
379 int ret = inet_addr_type(addr->v4.sin_addr.s_addr); 379 int ret = inet_addr_type(addr->v4.sin_addr.s_addr);
380 380
381 /* FIXME: ip_nonlocal_bind sysctl support. */
382 381
383 if (addr->v4.sin_addr.s_addr != INADDR_ANY && ret != RTN_LOCAL) 382 if (addr->v4.sin_addr.s_addr != INADDR_ANY &&
383 ret != RTN_LOCAL &&
384 !sp->inet.freebind &&
385 !sysctl_ip_nonlocal_bind)
384 return 0; 386 return 0;
387
385 return 1; 388 return 1;
386} 389}
387 390
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0b338eca6dc0..2a3c0e08a090 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4686,6 +4686,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
4686 struct sctp_endpoint *newep = newsp->ep; 4686 struct sctp_endpoint *newep = newsp->ep;
4687 struct sk_buff *skb, *tmp; 4687 struct sk_buff *skb, *tmp;
4688 struct sctp_ulpevent *event; 4688 struct sctp_ulpevent *event;
4689 int flags = 0;
4689 4690
4690 /* Migrate socket buffer sizes and all the socket level options to the 4691 /* Migrate socket buffer sizes and all the socket level options to the
4691 * new socket. 4692 * new socket.
@@ -4707,6 +4708,17 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
4707 sctp_sk(newsk)->bind_hash = pp; 4708 sctp_sk(newsk)->bind_hash = pp;
4708 inet_sk(newsk)->num = inet_sk(oldsk)->num; 4709 inet_sk(newsk)->num = inet_sk(oldsk)->num;
4709 4710
4711 /* Copy the bind_addr list from the original endpoint to the new
4712 * endpoint so that we can handle restarts properly
4713 */
4714 if (assoc->peer.ipv4_address)
4715 flags |= SCTP_ADDR4_PEERSUPP;
4716 if (assoc->peer.ipv6_address)
4717 flags |= SCTP_ADDR6_PEERSUPP;
4718 sctp_bind_addr_copy(&newsp->ep->base.bind_addr,
4719 &oldsp->ep->base.bind_addr,
4720 SCTP_SCOPE_GLOBAL, GFP_KERNEL, flags);
4721
4710 /* Move any messages in the old socket's receive queue that are for the 4722 /* Move any messages in the old socket's receive queue that are for the
4711 * peeled off association to the new socket's receive queue. 4723 * peeled off association to the new socket's receive queue.
4712 */ 4724 */
diff --git a/net/socket.c b/net/socket.c
index 2cd44990d8d3..cec0cb38b9ce 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -4,7 +4,7 @@
4 * Version: @(#)socket.c 1.1.93 18/02/95 4 * Version: @(#)socket.c 1.1.93 18/02/95
5 * 5 *
6 * Authors: Orest Zborowski, <obz@Kodak.COM> 6 * Authors: Orest Zborowski, <obz@Kodak.COM>
7 * Ross Biro, <bir7@leland.Stanford.Edu> 7 * Ross Biro
8 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 8 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
9 * 9 *
10 * Fixes: 10 * Fixes:
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c478fc8db776..c420eba4876b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -770,33 +770,12 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
770 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd); 770 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
771 if (err) 771 if (err)
772 goto out_mknod_parent; 772 goto out_mknod_parent;
773 /* 773
774 * Yucky last component or no last component at all? 774 dentry = lookup_create(&nd, 0);
775 * (foo/., foo/.., /////)
776 */
777 err = -EEXIST;
778 if (nd.last_type != LAST_NORM)
779 goto out_mknod;
780 /*
781 * Lock the directory.
782 */
783 down(&nd.dentry->d_inode->i_sem);
784 /*
785 * Do the final lookup.
786 */
787 dentry = lookup_hash(&nd.last, nd.dentry);
788 err = PTR_ERR(dentry); 775 err = PTR_ERR(dentry);
789 if (IS_ERR(dentry)) 776 if (IS_ERR(dentry))
790 goto out_mknod_unlock; 777 goto out_mknod_unlock;
791 err = -ENOENT; 778
792 /*
793 * Special case - lookup gave negative, but... we had foo/bar/
794 * From the vfs_mknod() POV we just have a negative dentry -
795 * all is fine. Let's be bastards - you had / on the end, you've
796 * been asking for (non-existent) directory. -ENOENT for you.
797 */
798 if (nd.last.name[nd.last.len] && !dentry->d_inode)
799 goto out_mknod_dput;
800 /* 779 /*
801 * All right, let's create it. 780 * All right, let's create it.
802 */ 781 */
@@ -845,7 +824,6 @@ out_mknod_dput:
845 dput(dentry); 824 dput(dentry);
846out_mknod_unlock: 825out_mknod_unlock:
847 up(&nd.dentry->d_inode->i_sem); 826 up(&nd.dentry->d_inode->i_sem);
848out_mknod:
849 path_release(&nd); 827 path_release(&nd);
850out_mknod_parent: 828out_mknod_parent:
851 if (err==-EEXIST) 829 if (err==-EEXIST)
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 080aae243ce0..2f4531fcaca2 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -698,7 +698,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
698 return -ENOMEM; 698 return -ENOMEM;
699 699
700 if (skb1->sk) 700 if (skb1->sk)
701 skb_set_owner_w(skb, skb1->sk); 701 skb_set_owner_w(skb2, skb1->sk);
702 702
703 /* Looking around. Are we still alive? 703 /* Looking around. Are we still alive?
704 * OK, link new skb, drop old one */ 704 * OK, link new skb, drop old one */
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 55ed979db144..d07f5ce31824 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1136,7 +1136,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
1136 struct xfrm_dst *last; 1136 struct xfrm_dst *last;
1137 u32 mtu; 1137 u32 mtu;
1138 1138
1139 if (!dst_check(dst->path, 0) || 1139 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
1140 (dst->dev && !netif_running(dst->dev))) 1140 (dst->dev && !netif_running(dst->dev)))
1141 return 0; 1141 return 0;
1142 1142
@@ -1156,7 +1156,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
1156 xdst->child_mtu_cached = mtu; 1156 xdst->child_mtu_cached = mtu;
1157 } 1157 }
1158 1158
1159 if (!dst_check(xdst->route, 0)) 1159 if (!dst_check(xdst->route, xdst->route_cookie))
1160 return 0; 1160 return 0;
1161 mtu = dst_mtu(xdst->route); 1161 mtu = dst_mtu(xdst->route);
1162 if (xdst->route_mtu_cached != mtu) { 1162 if (xdst->route_mtu_cached != mtu) {
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 5ddda2c98af9..97509011c274 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -34,14 +34,21 @@ static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
34{ 34{
35 struct rtattr *rt = xfrma[type - 1]; 35 struct rtattr *rt = xfrma[type - 1];
36 struct xfrm_algo *algp; 36 struct xfrm_algo *algp;
37 int len;
37 38
38 if (!rt) 39 if (!rt)
39 return 0; 40 return 0;
40 41
41 if ((rt->rta_len - sizeof(*rt)) < sizeof(*algp)) 42 len = (rt->rta_len - sizeof(*rt)) - sizeof(*algp);
43 if (len < 0)
42 return -EINVAL; 44 return -EINVAL;
43 45
44 algp = RTA_DATA(rt); 46 algp = RTA_DATA(rt);
47
48 len -= (algp->alg_key_len + 7U) / 8;
49 if (len < 0)
50 return -EINVAL;
51
45 switch (type) { 52 switch (type) {
46 case XFRMA_ALG_AUTH: 53 case XFRMA_ALG_AUTH:
47 if (!algp->alg_key_len && 54 if (!algp->alg_key_len &&
@@ -162,6 +169,7 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
162 struct rtattr *rta = u_arg; 169 struct rtattr *rta = u_arg;
163 struct xfrm_algo *p, *ualg; 170 struct xfrm_algo *p, *ualg;
164 struct xfrm_algo_desc *algo; 171 struct xfrm_algo_desc *algo;
172 int len;
165 173
166 if (!rta) 174 if (!rta)
167 return 0; 175 return 0;
@@ -173,11 +181,12 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
173 return -ENOSYS; 181 return -ENOSYS;
174 *props = algo->desc.sadb_alg_id; 182 *props = algo->desc.sadb_alg_id;
175 183
176 p = kmalloc(sizeof(*ualg) + ualg->alg_key_len, GFP_KERNEL); 184 len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8;
185 p = kmalloc(len, GFP_KERNEL);
177 if (!p) 186 if (!p)
178 return -ENOMEM; 187 return -ENOMEM;
179 188
180 memcpy(p, ualg, sizeof(*ualg) + ualg->alg_key_len); 189 memcpy(p, ualg, len);
181 *algpp = p; 190 *algpp = p;
182 return 0; 191 return 0;
183} 192}