aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlanproc.c36
-rw-r--r--net/bridge/br_if.c4
-rw-r--r--net/bridge/br_ioctl.c4
-rw-r--r--net/bridge/br_netfilter.c138
-rw-r--r--net/bridge/br_netlink.c3
-rw-r--r--net/core/dev.c99
-rw-r--r--net/core/dev_mcast.c5
-rw-r--r--net/core/rtnetlink.c7
-rw-r--r--net/decnet/af_decnet.c11
-rw-r--r--net/decnet/dn_dev.c85
-rw-r--r--net/decnet/dn_fib.c2
-rw-r--r--net/decnet/dn_route.c14
-rw-r--r--net/ipv4/devinet.c17
-rw-r--r--net/ipv4/igmp.c15
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_gre.c20
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c26
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_highspeed.c24
-rw-r--r--net/ipv4/tcp_yeah.h7
-rw-r--r--net/ipv6/addrconf.c28
-rw-r--r--net/ipv6/anycast.c17
-rw-r--r--net/ipv6/mcast.c15
-rw-r--r--net/iucv/af_iucv.c193
-rw-r--r--net/iucv/iucv.c49
-rw-r--r--net/llc/llc_core.c10
-rw-r--r--net/netlink/af_netlink.c34
-rw-r--r--net/netrom/nr_route.c5
-rw-r--r--net/rose/rose_route.c8
-rw-r--r--net/rxrpc/Kconfig3
-rw-r--r--net/rxrpc/ar-ack.c80
-rw-r--r--net/rxrpc/ar-error.c2
-rw-r--r--net/rxrpc/ar-output.c2
-rw-r--r--net/rxrpc/ar-peer.c45
-rw-r--r--net/sched/sch_api.c7
-rw-r--r--net/sctp/associola.c29
-rw-r--r--net/sctp/ipv6.c49
-rw-r--r--net/sctp/protocol.c81
-rw-r--r--net/sctp/sm_make_chunk.c15
-rw-r--r--net/sctp/sm_sideeffect.c35
-rw-r--r--net/sctp/sm_statefuns.c29
-rw-r--r--net/sctp/socket.c40
-rw-r--r--net/tipc/eth_media.c12
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--net/xfrm/xfrm_user.c73
48 files changed, 816 insertions, 578 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index c0c7bb8e9f07..bd93c45778d4 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -117,8 +117,7 @@ static void __exit vlan_cleanup_devices(void)
117 struct net_device *dev, *nxt; 117 struct net_device *dev, *nxt;
118 118
119 rtnl_lock(); 119 rtnl_lock();
120 for (dev = dev_base; dev; dev = nxt) { 120 for_each_netdev_safe(dev, nxt) {
121 nxt = dev->next;
122 if (dev->priv_flags & IFF_802_1Q_VLAN) { 121 if (dev->priv_flags & IFF_802_1Q_VLAN) {
123 unregister_vlan_dev(VLAN_DEV_INFO(dev)->real_dev, 122 unregister_vlan_dev(VLAN_DEV_INFO(dev)->real_dev,
124 VLAN_DEV_INFO(dev)->vlan_id); 123 VLAN_DEV_INFO(dev)->vlan_id);
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 5e24f72602a1..d216a64421cd 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -237,13 +237,9 @@ int vlan_proc_rem_dev(struct net_device *vlandev)
237 * The following few functions build the content of /proc/net/vlan/config 237 * The following few functions build the content of /proc/net/vlan/config
238 */ 238 */
239 239
240/* starting at dev, find a VLAN device */ 240static inline int is_vlan_dev(struct net_device *dev)
241static struct net_device *vlan_skip(struct net_device *dev)
242{ 241{
243 while (dev && !(dev->priv_flags & IFF_802_1Q_VLAN)) 242 return dev->priv_flags & IFF_802_1Q_VLAN;
244 dev = dev->next;
245
246 return dev;
247} 243}
248 244
249/* start read of /proc/net/vlan/config */ 245/* start read of /proc/net/vlan/config */
@@ -257,19 +253,35 @@ static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
257 if (*pos == 0) 253 if (*pos == 0)
258 return SEQ_START_TOKEN; 254 return SEQ_START_TOKEN;
259 255
260 for (dev = vlan_skip(dev_base); dev && i < *pos; 256 for_each_netdev(dev) {
261 dev = vlan_skip(dev->next), ++i); 257 if (!is_vlan_dev(dev))
258 continue;
259
260 if (i++ == *pos)
261 return dev;
262 }
262 263
263 return (i == *pos) ? dev : NULL; 264 return NULL;
264} 265}
265 266
266static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) 267static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
267{ 268{
269 struct net_device *dev;
270
268 ++*pos; 271 ++*pos;
269 272
270 return vlan_skip((v == SEQ_START_TOKEN) 273 dev = (struct net_device *)v;
271 ? dev_base 274 if (v == SEQ_START_TOKEN)
272 : ((struct net_device *)v)->next); 275 dev = net_device_entry(&dev_base_head);
276
277 for_each_netdev_continue(dev) {
278 if (!is_vlan_dev(dev))
279 continue;
280
281 return dev;
282 }
283
284 return NULL;
273} 285}
274 286
275static void vlan_seq_stop(struct seq_file *seq, void *v) 287static void vlan_seq_stop(struct seq_file *seq, void *v)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 690573bbf012..849deaf14108 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -475,11 +475,9 @@ void __exit br_cleanup_bridges(void)
475 struct net_device *dev, *nxt; 475 struct net_device *dev, *nxt;
476 476
477 rtnl_lock(); 477 rtnl_lock();
478 for (dev = dev_base; dev; dev = nxt) { 478 for_each_netdev_safe(dev, nxt)
479 nxt = dev->next;
480 if (dev->priv_flags & IFF_EBRIDGE) 479 if (dev->priv_flags & IFF_EBRIDGE)
481 del_br(dev->priv); 480 del_br(dev->priv);
482 }
483 rtnl_unlock(); 481 rtnl_unlock();
484 482
485} 483}
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index eda0fbfc923a..bb15e9e259b1 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -27,7 +27,9 @@ static int get_bridge_ifindices(int *indices, int num)
27 struct net_device *dev; 27 struct net_device *dev;
28 int i = 0; 28 int i = 0;
29 29
30 for (dev = dev_base; dev && i < num; dev = dev->next) { 30 for_each_netdev(dev) {
31 if (i >= num)
32 break;
31 if (dev->priv_flags & IFF_EBRIDGE) 33 if (dev->priv_flags & IFF_EBRIDGE)
32 indices[i++] = dev->ifindex; 34 indices[i++] = dev->ifindex;
33 } 35 }
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 9b2986b182ba..fa779874b9dd 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -142,14 +142,33 @@ static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
142 return skb->nf_bridge; 142 return skb->nf_bridge;
143} 143}
144 144
145static inline void nf_bridge_save_header(struct sk_buff *skb) 145static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
146{
147 unsigned int len = nf_bridge_encap_header_len(skb);
148
149 skb_push(skb, len);
150 skb->network_header -= len;
151}
152
153static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
146{ 154{
147 int header_size = ETH_HLEN; 155 unsigned int len = nf_bridge_encap_header_len(skb);
156
157 skb_pull(skb, len);
158 skb->network_header += len;
159}
148 160
149 if (skb->protocol == htons(ETH_P_8021Q)) 161static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
150 header_size += VLAN_HLEN; 162{
151 else if (skb->protocol == htons(ETH_P_PPP_SES)) 163 unsigned int len = nf_bridge_encap_header_len(skb);
152 header_size += PPPOE_SES_HLEN; 164
165 skb_pull_rcsum(skb, len);
166 skb->network_header += len;
167}
168
169static inline void nf_bridge_save_header(struct sk_buff *skb)
170{
171 int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
153 172
154 skb_copy_from_linear_data_offset(skb, -header_size, 173 skb_copy_from_linear_data_offset(skb, -header_size,
155 skb->nf_bridge->data, header_size); 174 skb->nf_bridge->data, header_size);
@@ -162,12 +181,7 @@ static inline void nf_bridge_save_header(struct sk_buff *skb)
162int nf_bridge_copy_header(struct sk_buff *skb) 181int nf_bridge_copy_header(struct sk_buff *skb)
163{ 182{
164 int err; 183 int err;
165 int header_size = ETH_HLEN; 184 int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
166
167 if (skb->protocol == htons(ETH_P_8021Q))
168 header_size += VLAN_HLEN;
169 else if (skb->protocol == htons(ETH_P_PPP_SES))
170 header_size += PPPOE_SES_HLEN;
171 185
172 err = skb_cow(skb, header_size); 186 err = skb_cow(skb, header_size);
173 if (err) 187 if (err)
@@ -175,11 +189,7 @@ int nf_bridge_copy_header(struct sk_buff *skb)
175 189
176 skb_copy_to_linear_data_offset(skb, -header_size, 190 skb_copy_to_linear_data_offset(skb, -header_size,
177 skb->nf_bridge->data, header_size); 191 skb->nf_bridge->data, header_size);
178 192 __skb_push(skb, nf_bridge_encap_header_len(skb));
179 if (skb->protocol == htons(ETH_P_8021Q))
180 __skb_push(skb, VLAN_HLEN);
181 else if (skb->protocol == htons(ETH_P_PPP_SES))
182 __skb_push(skb, PPPOE_SES_HLEN);
183 return 0; 193 return 0;
184} 194}
185 195
@@ -200,13 +210,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
200 dst_hold(skb->dst); 210 dst_hold(skb->dst);
201 211
202 skb->dev = nf_bridge->physindev; 212 skb->dev = nf_bridge->physindev;
203 if (skb->protocol == htons(ETH_P_8021Q)) { 213 nf_bridge_push_encap_header(skb);
204 skb_push(skb, VLAN_HLEN);
205 skb->network_header -= VLAN_HLEN;
206 } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
207 skb_push(skb, PPPOE_SES_HLEN);
208 skb->network_header -= PPPOE_SES_HLEN;
209 }
210 NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, 214 NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
211 br_handle_frame_finish, 1); 215 br_handle_frame_finish, 1);
212 216
@@ -284,13 +288,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
284 if (!skb->dev) 288 if (!skb->dev)
285 kfree_skb(skb); 289 kfree_skb(skb);
286 else { 290 else {
287 if (skb->protocol == htons(ETH_P_8021Q)) { 291 nf_bridge_pull_encap_header(skb);
288 skb_pull(skb, VLAN_HLEN);
289 skb->network_header += VLAN_HLEN;
290 } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
291 skb_pull(skb, PPPOE_SES_HLEN);
292 skb->network_header += PPPOE_SES_HLEN;
293 }
294 skb->dst->output(skb); 292 skb->dst->output(skb);
295 } 293 }
296 return 0; 294 return 0;
@@ -356,15 +354,7 @@ bridged_dnat:
356 * bridged frame */ 354 * bridged frame */
357 nf_bridge->mask |= BRNF_BRIDGED_DNAT; 355 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
358 skb->dev = nf_bridge->physindev; 356 skb->dev = nf_bridge->physindev;
359 if (skb->protocol == 357 nf_bridge_push_encap_header(skb);
360 htons(ETH_P_8021Q)) {
361 skb_push(skb, VLAN_HLEN);
362 skb->network_header -= VLAN_HLEN;
363 } else if(skb->protocol ==
364 htons(ETH_P_PPP_SES)) {
365 skb_push(skb, PPPOE_SES_HLEN);
366 skb->network_header -= PPPOE_SES_HLEN;
367 }
368 NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, 358 NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING,
369 skb, skb->dev, NULL, 359 skb, skb->dev, NULL,
370 br_nf_pre_routing_finish_bridge, 360 br_nf_pre_routing_finish_bridge,
@@ -380,13 +370,7 @@ bridged_dnat:
380 } 370 }
381 371
382 skb->dev = nf_bridge->physindev; 372 skb->dev = nf_bridge->physindev;
383 if (skb->protocol == htons(ETH_P_8021Q)) { 373 nf_bridge_push_encap_header(skb);
384 skb_push(skb, VLAN_HLEN);
385 skb->network_header -= VLAN_HLEN;
386 } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
387 skb_push(skb, PPPOE_SES_HLEN);
388 skb->network_header -= PPPOE_SES_HLEN;
389 }
390 NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, 374 NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
391 br_handle_frame_finish, 1); 375 br_handle_frame_finish, 1);
392 376
@@ -536,14 +520,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
536#endif 520#endif
537 if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL) 521 if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
538 goto out; 522 goto out;
539 523 nf_bridge_pull_encap_header_rcsum(skb);
540 if (skb->protocol == htons(ETH_P_8021Q)) {
541 skb_pull_rcsum(skb, VLAN_HLEN);
542 skb->network_header += VLAN_HLEN;
543 } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
544 skb_pull_rcsum(skb, PPPOE_SES_HLEN);
545 skb->network_header += PPPOE_SES_HLEN;
546 }
547 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn); 524 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
548 } 525 }
549#ifdef CONFIG_SYSCTL 526#ifdef CONFIG_SYSCTL
@@ -557,14 +534,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
557 534
558 if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL) 535 if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
559 goto out; 536 goto out;
560 537 nf_bridge_pull_encap_header_rcsum(skb);
561 if (skb->protocol == htons(ETH_P_8021Q)) {
562 skb_pull_rcsum(skb, VLAN_HLEN);
563 skb->network_header += VLAN_HLEN;
564 } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
565 skb_pull_rcsum(skb, PPPOE_SES_HLEN);
566 skb->network_header += PPPOE_SES_HLEN;
567 }
568 538
569 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 539 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
570 goto inhdr_error; 540 goto inhdr_error;
@@ -642,13 +612,7 @@ static int br_nf_forward_finish(struct sk_buff *skb)
642 } else { 612 } else {
643 in = *((struct net_device **)(skb->cb)); 613 in = *((struct net_device **)(skb->cb));
644 } 614 }
645 if (skb->protocol == htons(ETH_P_8021Q)) { 615 nf_bridge_push_encap_header(skb);
646 skb_push(skb, VLAN_HLEN);
647 skb->network_header -= VLAN_HLEN;
648 } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
649 skb_push(skb, PPPOE_SES_HLEN);
650 skb->network_header -= PPPOE_SES_HLEN;
651 }
652 NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, in, 616 NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, in,
653 skb->dev, br_forward_finish, 1); 617 skb->dev, br_forward_finish, 1);
654 return 0; 618 return 0;
@@ -682,13 +646,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb,
682 else 646 else
683 pf = PF_INET6; 647 pf = PF_INET6;
684 648
685 if (skb->protocol == htons(ETH_P_8021Q)) { 649 nf_bridge_pull_encap_header(*pskb);
686 skb_pull(*pskb, VLAN_HLEN);
687 (*pskb)->network_header += VLAN_HLEN;
688 } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
689 skb_pull(*pskb, PPPOE_SES_HLEN);
690 (*pskb)->network_header += PPPOE_SES_HLEN;
691 }
692 650
693 nf_bridge = skb->nf_bridge; 651 nf_bridge = skb->nf_bridge;
694 if (skb->pkt_type == PACKET_OTHERHOST) { 652 if (skb->pkt_type == PACKET_OTHERHOST) {
@@ -722,15 +680,12 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff **pskb,
722 if (skb->protocol != htons(ETH_P_ARP)) { 680 if (skb->protocol != htons(ETH_P_ARP)) {
723 if (!IS_VLAN_ARP(skb)) 681 if (!IS_VLAN_ARP(skb))
724 return NF_ACCEPT; 682 return NF_ACCEPT;
725 skb_pull(*pskb, VLAN_HLEN); 683 nf_bridge_pull_encap_header(*pskb);
726 (*pskb)->network_header += VLAN_HLEN;
727 } 684 }
728 685
729 if (arp_hdr(skb)->ar_pln != 4) { 686 if (arp_hdr(skb)->ar_pln != 4) {
730 if (IS_VLAN_ARP(skb)) { 687 if (IS_VLAN_ARP(skb))
731 skb_push(*pskb, VLAN_HLEN); 688 nf_bridge_push_encap_header(*pskb);
732 (*pskb)->network_header -= VLAN_HLEN;
733 }
734 return NF_ACCEPT; 689 return NF_ACCEPT;
735 } 690 }
736 *d = (struct net_device *)in; 691 *d = (struct net_device *)in;
@@ -777,13 +732,7 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb,
777 skb->pkt_type = PACKET_OTHERHOST; 732 skb->pkt_type = PACKET_OTHERHOST;
778 nf_bridge->mask ^= BRNF_PKT_TYPE; 733 nf_bridge->mask ^= BRNF_PKT_TYPE;
779 } 734 }
780 if (skb->protocol == htons(ETH_P_8021Q)) { 735 nf_bridge_push_encap_header(skb);
781 skb_push(skb, VLAN_HLEN);
782 skb->network_header -= VLAN_HLEN;
783 } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
784 skb_push(skb, PPPOE_SES_HLEN);
785 skb->network_header -= PPPOE_SES_HLEN;
786 }
787 736
788 NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, realindev, skb->dev, 737 NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, realindev, skb->dev,
789 br_forward_finish); 738 br_forward_finish);
@@ -848,14 +797,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
848 nf_bridge->mask |= BRNF_PKT_TYPE; 797 nf_bridge->mask |= BRNF_PKT_TYPE;
849 } 798 }
850 799
851 if (skb->protocol == htons(ETH_P_8021Q)) { 800 nf_bridge_pull_encap_header(skb);
852 skb_pull(skb, VLAN_HLEN);
853 skb->network_header += VLAN_HLEN;
854 } else if (skb->protocol == htons(ETH_P_PPP_SES)) {
855 skb_pull(skb, PPPOE_SES_HLEN);
856 skb->network_header += PPPOE_SES_HLEN;
857 }
858
859 nf_bridge_save_header(skb); 801 nf_bridge_save_header(skb);
860 802
861#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 803#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 35facc0c11c2..0fcf6f073064 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -109,7 +109,8 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
109 struct net_device *dev; 109 struct net_device *dev;
110 int idx; 110 int idx;
111 111
112 for (dev = dev_base, idx = 0; dev; dev = dev->next) { 112 idx = 0;
113 for_each_netdev(dev) {
113 /* not a bridge port */ 114 /* not a bridge port */
114 if (dev->br_port == NULL || idx < cb->args[0]) 115 if (dev->br_port == NULL || idx < cb->args[0])
115 goto skip; 116 goto skip;
diff --git a/net/core/dev.c b/net/core/dev.c
index eb999003bbb7..f27d4ab181e6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -156,13 +156,13 @@ static spinlock_t net_dma_event_lock;
156#endif 156#endif
157 157
158/* 158/*
159 * The @dev_base list is protected by @dev_base_lock and the rtnl 159 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
160 * semaphore. 160 * semaphore.
161 * 161 *
162 * Pure readers hold dev_base_lock for reading. 162 * Pure readers hold dev_base_lock for reading.
163 * 163 *
164 * Writers must hold the rtnl semaphore while they loop through the 164 * Writers must hold the rtnl semaphore while they loop through the
165 * dev_base list, and hold dev_base_lock for writing when they do the 165 * dev_base_head list, and hold dev_base_lock for writing when they do the
166 * actual updates. This allows pure readers to access the list even 166 * actual updates. This allows pure readers to access the list even
167 * while a writer is preparing to update it. 167 * while a writer is preparing to update it.
168 * 168 *
@@ -174,11 +174,10 @@ static spinlock_t net_dma_event_lock;
174 * unregister_netdevice(), which must be called with the rtnl 174 * unregister_netdevice(), which must be called with the rtnl
175 * semaphore held. 175 * semaphore held.
176 */ 176 */
177struct net_device *dev_base; 177LIST_HEAD(dev_base_head);
178static struct net_device **dev_tail = &dev_base;
179DEFINE_RWLOCK(dev_base_lock); 178DEFINE_RWLOCK(dev_base_lock);
180 179
181EXPORT_SYMBOL(dev_base); 180EXPORT_SYMBOL(dev_base_head);
182EXPORT_SYMBOL(dev_base_lock); 181EXPORT_SYMBOL(dev_base_lock);
183 182
184#define NETDEV_HASHBITS 8 183#define NETDEV_HASHBITS 8
@@ -567,26 +566,38 @@ struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
567 566
568 ASSERT_RTNL(); 567 ASSERT_RTNL();
569 568
570 for (dev = dev_base; dev; dev = dev->next) 569 for_each_netdev(dev)
571 if (dev->type == type && 570 if (dev->type == type &&
572 !memcmp(dev->dev_addr, ha, dev->addr_len)) 571 !memcmp(dev->dev_addr, ha, dev->addr_len))
573 break; 572 return dev;
574 return dev; 573
574 return NULL;
575} 575}
576 576
577EXPORT_SYMBOL(dev_getbyhwaddr); 577EXPORT_SYMBOL(dev_getbyhwaddr);
578 578
579struct net_device *__dev_getfirstbyhwtype(unsigned short type)
580{
581 struct net_device *dev;
582
583 ASSERT_RTNL();
584 for_each_netdev(dev)
585 if (dev->type == type)
586 return dev;
587
588 return NULL;
589}
590
591EXPORT_SYMBOL(__dev_getfirstbyhwtype);
592
579struct net_device *dev_getfirstbyhwtype(unsigned short type) 593struct net_device *dev_getfirstbyhwtype(unsigned short type)
580{ 594{
581 struct net_device *dev; 595 struct net_device *dev;
582 596
583 rtnl_lock(); 597 rtnl_lock();
584 for (dev = dev_base; dev; dev = dev->next) { 598 dev = __dev_getfirstbyhwtype(type);
585 if (dev->type == type) { 599 if (dev)
586 dev_hold(dev); 600 dev_hold(dev);
587 break;
588 }
589 }
590 rtnl_unlock(); 601 rtnl_unlock();
591 return dev; 602 return dev;
592} 603}
@@ -606,17 +617,19 @@ EXPORT_SYMBOL(dev_getfirstbyhwtype);
606 617
607struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask) 618struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
608{ 619{
609 struct net_device *dev; 620 struct net_device *dev, *ret;
610 621
622 ret = NULL;
611 read_lock(&dev_base_lock); 623 read_lock(&dev_base_lock);
612 for (dev = dev_base; dev != NULL; dev = dev->next) { 624 for_each_netdev(dev) {
613 if (((dev->flags ^ if_flags) & mask) == 0) { 625 if (((dev->flags ^ if_flags) & mask) == 0) {
614 dev_hold(dev); 626 dev_hold(dev);
627 ret = dev;
615 break; 628 break;
616 } 629 }
617 } 630 }
618 read_unlock(&dev_base_lock); 631 read_unlock(&dev_base_lock);
619 return dev; 632 return ret;
620} 633}
621 634
622/** 635/**
@@ -682,7 +695,7 @@ int dev_alloc_name(struct net_device *dev, const char *name)
682 if (!inuse) 695 if (!inuse)
683 return -ENOMEM; 696 return -ENOMEM;
684 697
685 for (d = dev_base; d; d = d->next) { 698 for_each_netdev(d) {
686 if (!sscanf(d->name, name, &i)) 699 if (!sscanf(d->name, name, &i))
687 continue; 700 continue;
688 if (i < 0 || i >= max_netdevices) 701 if (i < 0 || i >= max_netdevices)
@@ -964,7 +977,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
964 rtnl_lock(); 977 rtnl_lock();
965 err = raw_notifier_chain_register(&netdev_chain, nb); 978 err = raw_notifier_chain_register(&netdev_chain, nb);
966 if (!err) { 979 if (!err) {
967 for (dev = dev_base; dev; dev = dev->next) { 980 for_each_netdev(dev) {
968 nb->notifier_call(nb, NETDEV_REGISTER, dev); 981 nb->notifier_call(nb, NETDEV_REGISTER, dev);
969 982
970 if (dev->flags & IFF_UP) 983 if (dev->flags & IFF_UP)
@@ -2038,7 +2051,7 @@ static int dev_ifconf(char __user *arg)
2038 */ 2051 */
2039 2052
2040 total = 0; 2053 total = 0;
2041 for (dev = dev_base; dev; dev = dev->next) { 2054 for_each_netdev(dev) {
2042 for (i = 0; i < NPROTO; i++) { 2055 for (i = 0; i < NPROTO; i++) {
2043 if (gifconf_list[i]) { 2056 if (gifconf_list[i]) {
2044 int done; 2057 int done;
@@ -2070,26 +2083,28 @@ static int dev_ifconf(char __user *arg)
2070 * This is invoked by the /proc filesystem handler to display a device 2083 * This is invoked by the /proc filesystem handler to display a device
2071 * in detail. 2084 * in detail.
2072 */ 2085 */
2073static struct net_device *dev_get_idx(loff_t pos) 2086void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2074{ 2087{
2088 loff_t off;
2075 struct net_device *dev; 2089 struct net_device *dev;
2076 loff_t i;
2077 2090
2078 for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next); 2091 read_lock(&dev_base_lock);
2092 if (!*pos)
2093 return SEQ_START_TOKEN;
2079 2094
2080 return i == pos ? dev : NULL; 2095 off = 1;
2081} 2096 for_each_netdev(dev)
2097 if (off++ == *pos)
2098 return dev;
2082 2099
2083void *dev_seq_start(struct seq_file *seq, loff_t *pos) 2100 return NULL;
2084{
2085 read_lock(&dev_base_lock);
2086 return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
2087} 2101}
2088 2102
2089void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2103void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2090{ 2104{
2091 ++*pos; 2105 ++*pos;
2092 return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next; 2106 return v == SEQ_START_TOKEN ?
2107 first_net_device() : next_net_device((struct net_device *)v);
2093} 2108}
2094 2109
2095void dev_seq_stop(struct seq_file *seq, void *v) 2110void dev_seq_stop(struct seq_file *seq, void *v)
@@ -3071,11 +3086,9 @@ int register_netdevice(struct net_device *dev)
3071 3086
3072 set_bit(__LINK_STATE_PRESENT, &dev->state); 3087 set_bit(__LINK_STATE_PRESENT, &dev->state);
3073 3088
3074 dev->next = NULL;
3075 dev_init_scheduler(dev); 3089 dev_init_scheduler(dev);
3076 write_lock_bh(&dev_base_lock); 3090 write_lock_bh(&dev_base_lock);
3077 *dev_tail = dev; 3091 list_add_tail(&dev->dev_list, &dev_base_head);
3078 dev_tail = &dev->next;
3079 hlist_add_head(&dev->name_hlist, head); 3092 hlist_add_head(&dev->name_hlist, head);
3080 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex)); 3093 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
3081 dev_hold(dev); 3094 dev_hold(dev);
@@ -3349,8 +3362,6 @@ void synchronize_net(void)
3349 3362
3350void unregister_netdevice(struct net_device *dev) 3363void unregister_netdevice(struct net_device *dev)
3351{ 3364{
3352 struct net_device *d, **dp;
3353
3354 BUG_ON(dev_boot_phase); 3365 BUG_ON(dev_boot_phase);
3355 ASSERT_RTNL(); 3366 ASSERT_RTNL();
3356 3367
@@ -3370,19 +3381,11 @@ void unregister_netdevice(struct net_device *dev)
3370 dev_close(dev); 3381 dev_close(dev);
3371 3382
3372 /* And unlink it from device chain. */ 3383 /* And unlink it from device chain. */
3373 for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) { 3384 write_lock_bh(&dev_base_lock);
3374 if (d == dev) { 3385 list_del(&dev->dev_list);
3375 write_lock_bh(&dev_base_lock); 3386 hlist_del(&dev->name_hlist);
3376 hlist_del(&dev->name_hlist); 3387 hlist_del(&dev->index_hlist);
3377 hlist_del(&dev->index_hlist); 3388 write_unlock_bh(&dev_base_lock);
3378 if (dev_tail == &dev->next)
3379 dev_tail = dp;
3380 *dp = d->next;
3381 write_unlock_bh(&dev_base_lock);
3382 break;
3383 }
3384 }
3385 BUG_ON(!d);
3386 3389
3387 dev->reg_state = NETREG_UNREGISTERING; 3390 dev->reg_state = NETREG_UNREGISTERING;
3388 3391
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 7d57bf77f3a3..5a54053386c8 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -223,7 +223,7 @@ static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos)
223 loff_t off = 0; 223 loff_t off = 0;
224 224
225 read_lock(&dev_base_lock); 225 read_lock(&dev_base_lock);
226 for (dev = dev_base; dev; dev = dev->next) { 226 for_each_netdev(dev) {
227 if (off++ == *pos) 227 if (off++ == *pos)
228 return dev; 228 return dev;
229 } 229 }
@@ -232,9 +232,8 @@ static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos)
232 232
233static void *dev_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 233static void *dev_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
234{ 234{
235 struct net_device *dev = v;
236 ++*pos; 235 ++*pos;
237 return dev->next; 236 return next_net_device((struct net_device *)v);
238} 237}
239 238
240static void dev_mc_seq_stop(struct seq_file *seq, void *v) 239static void dev_mc_seq_stop(struct seq_file *seq, void *v)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index cec111109155..8c971a2efe2a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -539,13 +539,16 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
539 int s_idx = cb->args[0]; 539 int s_idx = cb->args[0];
540 struct net_device *dev; 540 struct net_device *dev;
541 541
542 for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) { 542 idx = 0;
543 for_each_netdev(dev) {
543 if (idx < s_idx) 544 if (idx < s_idx)
544 continue; 545 goto cont;
545 if (rtnl_fill_ifinfo(skb, dev, NULL, 0, RTM_NEWLINK, 546 if (rtnl_fill_ifinfo(skb, dev, NULL, 0, RTM_NEWLINK,
546 NETLINK_CB(cb->skb).pid, 547 NETLINK_CB(cb->skb).pid,
547 cb->nlh->nlmsg_seq, 0, NLM_F_MULTI) <= 0) 548 cb->nlh->nlmsg_seq, 0, NLM_F_MULTI) <= 0)
548 break; 549 break;
550cont:
551 idx++;
549 } 552 }
550 cb->args[0] = idx; 553 cb->args[0] = idx;
551 554
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index a205eaa87f52..9fbe87c93802 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -721,7 +721,7 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
721 struct sock *sk = sock->sk; 721 struct sock *sk = sock->sk;
722 struct dn_scp *scp = DN_SK(sk); 722 struct dn_scp *scp = DN_SK(sk);
723 struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr; 723 struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr;
724 struct net_device *dev; 724 struct net_device *dev, *ldev;
725 int rv; 725 int rv;
726 726
727 if (addr_len != sizeof(struct sockaddr_dn)) 727 if (addr_len != sizeof(struct sockaddr_dn))
@@ -746,14 +746,17 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
746 if (!(saddr->sdn_flags & SDF_WILD)) { 746 if (!(saddr->sdn_flags & SDF_WILD)) {
747 if (dn_ntohs(saddr->sdn_nodeaddrl)) { 747 if (dn_ntohs(saddr->sdn_nodeaddrl)) {
748 read_lock(&dev_base_lock); 748 read_lock(&dev_base_lock);
749 for(dev = dev_base; dev; dev = dev->next) { 749 ldev = NULL;
750 for_each_netdev(dev) {
750 if (!dev->dn_ptr) 751 if (!dev->dn_ptr)
751 continue; 752 continue;
752 if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) 753 if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) {
754 ldev = dev;
753 break; 755 break;
756 }
754 } 757 }
755 read_unlock(&dev_base_lock); 758 read_unlock(&dev_base_lock);
756 if (dev == NULL) 759 if (ldev == NULL)
757 return -EADDRNOTAVAIL; 760 return -EADDRNOTAVAIL;
758 } 761 }
759 } 762 }
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 5c2a9951b638..764a56a13e38 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -799,9 +799,10 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
799 skip_ndevs = cb->args[0]; 799 skip_ndevs = cb->args[0];
800 skip_naddr = cb->args[1]; 800 skip_naddr = cb->args[1];
801 801
802 for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) { 802 idx = 0;
803 for_each_netdev(dev) {
803 if (idx < skip_ndevs) 804 if (idx < skip_ndevs)
804 continue; 805 goto cont;
805 else if (idx > skip_ndevs) { 806 else if (idx > skip_ndevs) {
806 /* Only skip over addresses for first dev dumped 807 /* Only skip over addresses for first dev dumped
807 * in this iteration (idx == skip_ndevs) */ 808 * in this iteration (idx == skip_ndevs) */
@@ -809,18 +810,20 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
809 } 810 }
810 811
811 if ((dn_db = dev->dn_ptr) == NULL) 812 if ((dn_db = dev->dn_ptr) == NULL)
812 continue; 813 goto cont;
813 814
814 for (ifa = dn_db->ifa_list, dn_idx = 0; ifa; 815 for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
815 ifa = ifa->ifa_next, dn_idx++) { 816 ifa = ifa->ifa_next, dn_idx++) {
816 if (dn_idx < skip_naddr) 817 if (dn_idx < skip_naddr)
817 continue; 818 goto cont;
818 819
819 if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, 820 if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
820 cb->nlh->nlmsg_seq, RTM_NEWADDR, 821 cb->nlh->nlmsg_seq, RTM_NEWADDR,
821 NLM_F_MULTI) < 0) 822 NLM_F_MULTI) < 0)
822 goto done; 823 goto done;
823 } 824 }
825cont:
826 idx++;
824 } 827 }
825done: 828done:
826 cb->args[0] = idx; 829 cb->args[0] = idx;
@@ -1296,7 +1299,7 @@ void dn_dev_devices_off(void)
1296 struct net_device *dev; 1299 struct net_device *dev;
1297 1300
1298 rtnl_lock(); 1301 rtnl_lock();
1299 for(dev = dev_base; dev; dev = dev->next) 1302 for_each_netdev(dev)
1300 dn_dev_down(dev); 1303 dn_dev_down(dev);
1301 rtnl_unlock(); 1304 rtnl_unlock();
1302 1305
@@ -1307,7 +1310,7 @@ void dn_dev_devices_on(void)
1307 struct net_device *dev; 1310 struct net_device *dev;
1308 1311
1309 rtnl_lock(); 1312 rtnl_lock();
1310 for(dev = dev_base; dev; dev = dev->next) { 1313 for_each_netdev(dev) {
1311 if (dev->flags & IFF_UP) 1314 if (dev->flags & IFF_UP)
1312 dn_dev_up(dev); 1315 dn_dev_up(dev);
1313 } 1316 }
@@ -1325,62 +1328,56 @@ int unregister_dnaddr_notifier(struct notifier_block *nb)
1325} 1328}
1326 1329
1327#ifdef CONFIG_PROC_FS 1330#ifdef CONFIG_PROC_FS
1328static inline struct net_device *dn_dev_get_next(struct seq_file *seq, struct net_device *dev) 1331static inline int is_dn_dev(struct net_device *dev)
1329{ 1332{
1330 do { 1333 return dev->dn_ptr != NULL;
1331 dev = dev->next;
1332 } while(dev && !dev->dn_ptr);
1333
1334 return dev;
1335} 1334}
1336 1335
1337static struct net_device *dn_dev_get_idx(struct seq_file *seq, loff_t pos) 1336static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
1338{ 1337{
1338 int i;
1339 struct net_device *dev; 1339 struct net_device *dev;
1340 1340
1341 dev = dev_base; 1341 read_lock(&dev_base_lock);
1342 if (dev && !dev->dn_ptr)
1343 dev = dn_dev_get_next(seq, dev);
1344 if (pos) {
1345 while(dev && (dev = dn_dev_get_next(seq, dev)))
1346 --pos;
1347 }
1348 return dev;
1349}
1350 1342
1351static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) 1343 if (*pos == 0)
1352{ 1344 return SEQ_START_TOKEN;
1353 if (*pos) { 1345
1354 struct net_device *dev; 1346 i = 1;
1355 read_lock(&dev_base_lock); 1347 for_each_netdev(dev) {
1356 dev = dn_dev_get_idx(seq, *pos - 1); 1348 if (!is_dn_dev(dev))
1357 if (dev == NULL) 1349 continue;
1358 read_unlock(&dev_base_lock); 1350
1359 return dev; 1351 if (i++ == *pos)
1352 return dev;
1360 } 1353 }
1361 return SEQ_START_TOKEN; 1354
1355 return NULL;
1362} 1356}
1363 1357
1364static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1358static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1365{ 1359{
1366 struct net_device *dev = v; 1360 struct net_device *dev;
1367 loff_t one = 1;
1368 1361
1369 if (v == SEQ_START_TOKEN) {
1370 dev = dn_dev_seq_start(seq, &one);
1371 } else {
1372 dev = dn_dev_get_next(seq, dev);
1373 if (dev == NULL)
1374 read_unlock(&dev_base_lock);
1375 }
1376 ++*pos; 1362 ++*pos;
1377 return dev; 1363
1364 dev = (struct net_device *)v;
1365 if (v == SEQ_START_TOKEN)
1366 dev = net_device_entry(&dev_base_head);
1367
1368 for_each_netdev_continue(dev) {
1369 if (!is_dn_dev(dev))
1370 continue;
1371
1372 return dev;
1373 }
1374
1375 return NULL;
1378} 1376}
1379 1377
1380static void dn_dev_seq_stop(struct seq_file *seq, void *v) 1378static void dn_dev_seq_stop(struct seq_file *seq, void *v)
1381{ 1379{
1382 if (v && v != SEQ_START_TOKEN) 1380 read_unlock(&dev_base_lock);
1383 read_unlock(&dev_base_lock);
1384} 1381}
1385 1382
1386static char *dn_type2asc(char type) 1383static char *dn_type2asc(char type)
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 310a86268d2b..d2bc19d47950 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -602,7 +602,7 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa)
602 602
603 /* Scan device list */ 603 /* Scan device list */
604 read_lock(&dev_base_lock); 604 read_lock(&dev_base_lock);
605 for(dev = dev_base; dev; dev = dev->next) { 605 for_each_netdev(dev) {
606 dn_db = dev->dn_ptr; 606 dn_db = dev->dn_ptr;
607 if (dn_db == NULL) 607 if (dn_db == NULL)
608 continue; 608 continue;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 5d7337bcf0fe..a8bf106b7a61 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -886,7 +886,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
886 .iif = loopback_dev.ifindex, 886 .iif = loopback_dev.ifindex,
887 .oif = oldflp->oif }; 887 .oif = oldflp->oif };
888 struct dn_route *rt = NULL; 888 struct dn_route *rt = NULL;
889 struct net_device *dev_out = NULL; 889 struct net_device *dev_out = NULL, *dev;
890 struct neighbour *neigh = NULL; 890 struct neighbour *neigh = NULL;
891 unsigned hash; 891 unsigned hash;
892 unsigned flags = 0; 892 unsigned flags = 0;
@@ -925,15 +925,17 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
925 goto out; 925 goto out;
926 } 926 }
927 read_lock(&dev_base_lock); 927 read_lock(&dev_base_lock);
928 for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { 928 for_each_netdev(dev) {
929 if (!dev_out->dn_ptr) 929 if (!dev->dn_ptr)
930 continue; 930 continue;
931 if (!dn_dev_islocal(dev_out, oldflp->fld_src)) 931 if (!dn_dev_islocal(dev, oldflp->fld_src))
932 continue; 932 continue;
933 if ((dev_out->flags & IFF_LOOPBACK) && 933 if ((dev->flags & IFF_LOOPBACK) &&
934 oldflp->fld_dst && 934 oldflp->fld_dst &&
935 !dn_dev_islocal(dev_out, oldflp->fld_dst)) 935 !dn_dev_islocal(dev, oldflp->fld_dst))
936 continue; 936 continue;
937
938 dev_out = dev;
937 break; 939 break;
938 } 940 }
939 read_unlock(&dev_base_lock); 941 read_unlock(&dev_base_lock);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 088888db8b3d..7f95e6e9beeb 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -910,7 +910,7 @@ no_in_dev:
910 */ 910 */
911 read_lock(&dev_base_lock); 911 read_lock(&dev_base_lock);
912 rcu_read_lock(); 912 rcu_read_lock();
913 for (dev = dev_base; dev; dev = dev->next) { 913 for_each_netdev(dev) {
914 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) 914 if ((in_dev = __in_dev_get_rcu(dev)) == NULL)
915 continue; 915 continue;
916 916
@@ -989,7 +989,7 @@ __be32 inet_confirm_addr(const struct net_device *dev, __be32 dst, __be32 local,
989 989
990 read_lock(&dev_base_lock); 990 read_lock(&dev_base_lock);
991 rcu_read_lock(); 991 rcu_read_lock();
992 for (dev = dev_base; dev; dev = dev->next) { 992 for_each_netdev(dev) {
993 if ((in_dev = __in_dev_get_rcu(dev))) { 993 if ((in_dev = __in_dev_get_rcu(dev))) {
994 addr = confirm_addr_indev(in_dev, dst, local, scope); 994 addr = confirm_addr_indev(in_dev, dst, local, scope);
995 if (addr) 995 if (addr)
@@ -1182,23 +1182,26 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1182 int s_ip_idx, s_idx = cb->args[0]; 1182 int s_ip_idx, s_idx = cb->args[0];
1183 1183
1184 s_ip_idx = ip_idx = cb->args[1]; 1184 s_ip_idx = ip_idx = cb->args[1];
1185 for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) { 1185 idx = 0;
1186 for_each_netdev(dev) {
1186 if (idx < s_idx) 1187 if (idx < s_idx)
1187 continue; 1188 goto cont;
1188 if (idx > s_idx) 1189 if (idx > s_idx)
1189 s_ip_idx = 0; 1190 s_ip_idx = 0;
1190 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) 1191 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
1191 continue; 1192 goto cont;
1192 1193
1193 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa; 1194 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
1194 ifa = ifa->ifa_next, ip_idx++) { 1195 ifa = ifa->ifa_next, ip_idx++) {
1195 if (ip_idx < s_ip_idx) 1196 if (ip_idx < s_ip_idx)
1196 continue; 1197 goto cont;
1197 if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, 1198 if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
1198 cb->nlh->nlmsg_seq, 1199 cb->nlh->nlmsg_seq,
1199 RTM_NEWADDR, NLM_F_MULTI) <= 0) 1200 RTM_NEWADDR, NLM_F_MULTI) <= 0)
1200 goto done; 1201 goto done;
1201 } 1202 }
1203cont:
1204 idx++;
1202 } 1205 }
1203 1206
1204done: 1207done:
@@ -1243,7 +1246,7 @@ void inet_forward_change(void)
1243 ipv4_devconf_dflt.forwarding = on; 1246 ipv4_devconf_dflt.forwarding = on;
1244 1247
1245 read_lock(&dev_base_lock); 1248 read_lock(&dev_base_lock);
1246 for (dev = dev_base; dev; dev = dev->next) { 1249 for_each_netdev(dev) {
1247 struct in_device *in_dev; 1250 struct in_device *in_dev;
1248 rcu_read_lock(); 1251 rcu_read_lock();
1249 in_dev = __in_dev_get_rcu(dev); 1252 in_dev = __in_dev_get_rcu(dev);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 2506021c2935..f4dd47453108 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2288,9 +2288,8 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2288 struct ip_mc_list *im = NULL; 2288 struct ip_mc_list *im = NULL;
2289 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); 2289 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2290 2290
2291 for (state->dev = dev_base, state->in_dev = NULL; 2291 state->in_dev = NULL;
2292 state->dev; 2292 for_each_netdev(state->dev) {
2293 state->dev = state->dev->next) {
2294 struct in_device *in_dev; 2293 struct in_device *in_dev;
2295 in_dev = in_dev_get(state->dev); 2294 in_dev = in_dev_get(state->dev);
2296 if (!in_dev) 2295 if (!in_dev)
@@ -2316,7 +2315,7 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li
2316 read_unlock(&state->in_dev->mc_list_lock); 2315 read_unlock(&state->in_dev->mc_list_lock);
2317 in_dev_put(state->in_dev); 2316 in_dev_put(state->in_dev);
2318 } 2317 }
2319 state->dev = state->dev->next; 2318 state->dev = next_net_device(state->dev);
2320 if (!state->dev) { 2319 if (!state->dev) {
2321 state->in_dev = NULL; 2320 state->in_dev = NULL;
2322 break; 2321 break;
@@ -2450,9 +2449,9 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2450 struct ip_mc_list *im = NULL; 2449 struct ip_mc_list *im = NULL;
2451 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 2450 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2452 2451
2453 for (state->dev = dev_base, state->idev = NULL, state->im = NULL; 2452 state->idev = NULL;
2454 state->dev; 2453 state->im = NULL;
2455 state->dev = state->dev->next) { 2454 for_each_netdev(state->dev) {
2456 struct in_device *idev; 2455 struct in_device *idev;
2457 idev = in_dev_get(state->dev); 2456 idev = in_dev_get(state->dev);
2458 if (unlikely(idev == NULL)) 2457 if (unlikely(idev == NULL))
@@ -2488,7 +2487,7 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
2488 read_unlock(&state->idev->mc_list_lock); 2487 read_unlock(&state->idev->mc_list_lock);
2489 in_dev_put(state->idev); 2488 in_dev_put(state->idev);
2490 } 2489 }
2491 state->dev = state->dev->next; 2490 state->dev = next_net_device(state->dev);
2492 if (!state->dev) { 2491 if (!state->dev) {
2493 state->idev = NULL; 2492 state->idev = NULL;
2494 goto out; 2493 goto out;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 597c800b2fdc..342ca8d89458 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -192,7 +192,7 @@ static int __init ic_open_devs(void)
192 if (dev_change_flags(&loopback_dev, loopback_dev.flags | IFF_UP) < 0) 192 if (dev_change_flags(&loopback_dev, loopback_dev.flags | IFF_UP) < 0)
193 printk(KERN_ERR "IP-Config: Failed to open %s\n", loopback_dev.name); 193 printk(KERN_ERR "IP-Config: Failed to open %s\n", loopback_dev.name);
194 194
195 for (dev = dev_base; dev; dev = dev->next) { 195 for_each_netdev(dev) {
196 if (dev == &loopback_dev) 196 if (dev == &loopback_dev)
197 continue; 197 continue;
198 if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) : 198 if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
index e5a34c17d927..c3908bc5a709 100644
--- a/net/ipv4/netfilter/nf_nat_proto_gre.c
+++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
@@ -72,6 +72,11 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
72 __be16 *keyptr; 72 __be16 *keyptr;
73 unsigned int min, i, range_size; 73 unsigned int min, i, range_size;
74 74
75 /* If there is no master conntrack we are not PPTP,
76 do not change tuples */
77 if (!conntrack->master)
78 return 0;
79
75 if (maniptype == IP_NAT_MANIP_SRC) 80 if (maniptype == IP_NAT_MANIP_SRC)
76 keyptr = &tuple->src.u.gre.key; 81 keyptr = &tuple->src.u.gre.key;
77 else 82 else
@@ -122,18 +127,9 @@ gre_manip_pkt(struct sk_buff **pskb, unsigned int iphdroff,
122 if (maniptype != IP_NAT_MANIP_DST) 127 if (maniptype != IP_NAT_MANIP_DST)
123 return 1; 128 return 1;
124 switch (greh->version) { 129 switch (greh->version) {
125 case 0: 130 case GRE_VERSION_1701:
126 if (!greh->key) { 131 /* We do not currently NAT any GREv0 packets.
127 DEBUGP("can't nat GRE w/o key\n"); 132 * Try to behave like "nf_nat_proto_unknown" */
128 break;
129 }
130 if (greh->csum) {
131 /* FIXME: Never tested this code... */
132 nf_proto_csum_replace4(gre_csum(greh), *pskb,
133 *(gre_key(greh)),
134 tuple->dst.u.gre.key, 0);
135 }
136 *(gre_key(greh)) = tuple->dst.u.gre.key;
137 break; 133 break;
138 case GRE_VERSION_PPTP: 134 case GRE_VERSION_PPTP:
139 DEBUGP("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key)); 135 DEBUGP("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key));
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 2a283397a8b6..2534f718ab92 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -226,10 +226,6 @@ static int ipt_dnat_checkentry(const char *tablename,
226 printk("DNAT: multiple ranges no longer supported\n"); 226 printk("DNAT: multiple ranges no longer supported\n");
227 return 0; 227 return 0;
228 } 228 }
229 if (mr->range[0].flags & IP_NAT_RANGE_PROTO_RANDOM) {
230 printk("DNAT: port randomization not supported\n");
231 return 0;
232 }
233 return 1; 229 return 1;
234} 230}
235 231
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index bfd88e4e0685..fac97cf51ae5 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -222,6 +222,29 @@ static unsigned int mangle_sdp(struct sk_buff **pskb,
222 return mangle_content_len(pskb, ctinfo, ct, dptr); 222 return mangle_content_len(pskb, ctinfo, ct, dptr);
223} 223}
224 224
225static void ip_nat_sdp_expect(struct nf_conn *ct,
226 struct nf_conntrack_expect *exp)
227{
228 struct nf_nat_range range;
229
230 /* This must be a fresh one. */
231 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
232
233 /* Change src to where master sends to */
234 range.flags = IP_NAT_RANGE_MAP_IPS;
235 range.min_ip = range.max_ip
236 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
237 /* hook doesn't matter, but it has to do source manip */
238 nf_nat_setup_info(ct, &range, NF_IP_POST_ROUTING);
239
240 /* For DST manip, map port here to where it's expected. */
241 range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
242 range.min = range.max = exp->saved_proto;
243 range.min_ip = range.max_ip = exp->saved_ip;
244 /* hook doesn't matter, but it has to do destination manip */
245 nf_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);
246}
247
225/* So, this packet has hit the connection tracking matching code. 248/* So, this packet has hit the connection tracking matching code.
226 Mangle it, and change the expectation to match the new version. */ 249 Mangle it, and change the expectation to match the new version. */
227static unsigned int ip_nat_sdp(struct sk_buff **pskb, 250static unsigned int ip_nat_sdp(struct sk_buff **pskb,
@@ -239,13 +262,14 @@ static unsigned int ip_nat_sdp(struct sk_buff **pskb,
239 /* Connection will come from reply */ 262 /* Connection will come from reply */
240 newip = ct->tuplehash[!dir].tuple.dst.u3.ip; 263 newip = ct->tuplehash[!dir].tuple.dst.u3.ip;
241 264
265 exp->saved_ip = exp->tuple.dst.u3.ip;
242 exp->tuple.dst.u3.ip = newip; 266 exp->tuple.dst.u3.ip = newip;
243 exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port; 267 exp->saved_proto.udp.port = exp->tuple.dst.u.udp.port;
244 exp->dir = !dir; 268 exp->dir = !dir;
245 269
246 /* When you see the packet, we need to NAT it the same as the 270 /* When you see the packet, we need to NAT it the same as the
247 this one. */ 271 this one. */
248 exp->expectfn = nf_nat_follow_master; 272 exp->expectfn = ip_nat_sdp_expect;
249 273
250 /* Try to get same port: if not, try to change it. */ 274 /* Try to get same port: if not, try to change it. */
251 for (port = ntohs(exp->saved_proto.udp.port); port != 0; port++) { 275 for (port = ntohs(exp->saved_proto.udp.port); port != 0; port++) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d6e488668171..8b124eafbb90 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1760,8 +1760,7 @@ int tcp_disconnect(struct sock *sk, int flags)
1760 tcp_clear_retrans(tp); 1760 tcp_clear_retrans(tp);
1761 inet_csk_delack_init(sk); 1761 inet_csk_delack_init(sk);
1762 tcp_init_send_head(sk); 1762 tcp_init_send_head(sk);
1763 tp->rx_opt.saw_tstamp = 0; 1763 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
1764 tcp_sack_reset(&tp->rx_opt);
1765 __sk_dst_reset(sk); 1764 __sk_dst_reset(sk);
1766 1765
1767 BUG_TRAP(!inet->num || icsk->icsk_bind_hash); 1766 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index a291097fcc0a..43d624e5043c 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -97,10 +97,6 @@ struct hstcp {
97 u32 ai; 97 u32 ai;
98}; 98};
99 99
100static int max_ssthresh = 100;
101module_param(max_ssthresh, int, 0644);
102MODULE_PARM_DESC(max_ssthresh, "limited slow start threshold (RFC3742)");
103
104static void hstcp_init(struct sock *sk) 100static void hstcp_init(struct sock *sk)
105{ 101{
106 struct tcp_sock *tp = tcp_sk(sk); 102 struct tcp_sock *tp = tcp_sk(sk);
@@ -122,23 +118,9 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
122 if (!tcp_is_cwnd_limited(sk, in_flight)) 118 if (!tcp_is_cwnd_limited(sk, in_flight))
123 return; 119 return;
124 120
125 if (tp->snd_cwnd <= tp->snd_ssthresh) { 121 if (tp->snd_cwnd <= tp->snd_ssthresh)
126 /* RFC3742: limited slow start 122 tcp_slow_start(tp);
127 * the window is increased by 1/K MSS for each arriving ACK, 123 else {
128 * for K = int(cwnd/(0.5 max_ssthresh))
129 */
130 if (max_ssthresh > 0 && tp->snd_cwnd > max_ssthresh) {
131 u32 k = max(tp->snd_cwnd / (max_ssthresh >> 1), 1U);
132 if (++tp->snd_cwnd_cnt >= k) {
133 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
134 tp->snd_cwnd++;
135 tp->snd_cwnd_cnt = 0;
136 }
137 } else {
138 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
139 tp->snd_cwnd++;
140 }
141 } else {
142 /* Update AIMD parameters. 124 /* Update AIMD parameters.
143 * 125 *
144 * We want to guarantee that: 126 * We want to guarantee that:
diff --git a/net/ipv4/tcp_yeah.h b/net/ipv4/tcp_yeah.h
deleted file mode 100644
index ed3b7198f23c..000000000000
--- a/net/ipv4/tcp_yeah.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/skbuff.h>
4#include <linux/inet_diag.h>
5#include <asm/div64.h>
6
7#include <net/tcp.h>
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3452433cbc96..d02685c6bc69 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -449,7 +449,7 @@ static void addrconf_forward_change(void)
449 struct inet6_dev *idev; 449 struct inet6_dev *idev;
450 450
451 read_lock(&dev_base_lock); 451 read_lock(&dev_base_lock);
452 for (dev=dev_base; dev; dev=dev->next) { 452 for_each_netdev(dev) {
453 rcu_read_lock(); 453 rcu_read_lock();
454 idev = __in6_dev_get(dev); 454 idev = __in6_dev_get(dev);
455 if (idev) { 455 if (idev) {
@@ -911,7 +911,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
911 read_lock(&dev_base_lock); 911 read_lock(&dev_base_lock);
912 rcu_read_lock(); 912 rcu_read_lock();
913 913
914 for (dev = dev_base; dev; dev=dev->next) { 914 for_each_netdev(dev) {
915 struct inet6_dev *idev; 915 struct inet6_dev *idev;
916 struct inet6_ifaddr *ifa; 916 struct inet6_ifaddr *ifa;
917 917
@@ -2064,7 +2064,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2064 return; 2064 return;
2065 } 2065 }
2066 2066
2067 for (dev = dev_base; dev != NULL; dev = dev->next) { 2067 for_each_netdev(dev) {
2068 struct in_device * in_dev = __in_dev_get_rtnl(dev); 2068 struct in_device * in_dev = __in_dev_get_rtnl(dev);
2069 if (in_dev && (dev->flags & IFF_UP)) { 2069 if (in_dev && (dev->flags & IFF_UP)) {
2070 struct in_ifaddr * ifa; 2070 struct in_ifaddr * ifa;
@@ -2225,7 +2225,7 @@ static void ip6_tnl_add_linklocal(struct inet6_dev *idev)
2225 return; 2225 return;
2226 } 2226 }
2227 /* then try to inherit it from any device */ 2227 /* then try to inherit it from any device */
2228 for (link_dev = dev_base; link_dev; link_dev = link_dev->next) { 2228 for_each_netdev(link_dev) {
2229 if (!ipv6_inherit_linklocal(idev, link_dev)) 2229 if (!ipv6_inherit_linklocal(idev, link_dev))
2230 return; 2230 return;
2231 } 2231 }
@@ -3257,14 +3257,15 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3257 s_idx = cb->args[0]; 3257 s_idx = cb->args[0];
3258 s_ip_idx = ip_idx = cb->args[1]; 3258 s_ip_idx = ip_idx = cb->args[1];
3259 3259
3260 for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) { 3260 idx = 0;
3261 for_each_netdev(dev) {
3261 if (idx < s_idx) 3262 if (idx < s_idx)
3262 continue; 3263 goto cont;
3263 if (idx > s_idx) 3264 if (idx > s_idx)
3264 s_ip_idx = 0; 3265 s_ip_idx = 0;
3265 ip_idx = 0; 3266 ip_idx = 0;
3266 if ((idev = in6_dev_get(dev)) == NULL) 3267 if ((idev = in6_dev_get(dev)) == NULL)
3267 continue; 3268 goto cont;
3268 read_lock_bh(&idev->lock); 3269 read_lock_bh(&idev->lock);
3269 switch (type) { 3270 switch (type) {
3270 case UNICAST_ADDR: 3271 case UNICAST_ADDR:
@@ -3311,6 +3312,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3311 } 3312 }
3312 read_unlock_bh(&idev->lock); 3313 read_unlock_bh(&idev->lock);
3313 in6_dev_put(idev); 3314 in6_dev_put(idev);
3315cont:
3316 idx++;
3314 } 3317 }
3315done: 3318done:
3316 if (err <= 0) { 3319 if (err <= 0) {
@@ -3575,16 +3578,19 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
3575 struct inet6_dev *idev; 3578 struct inet6_dev *idev;
3576 3579
3577 read_lock(&dev_base_lock); 3580 read_lock(&dev_base_lock);
3578 for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) { 3581 idx = 0;
3582 for_each_netdev(dev) {
3579 if (idx < s_idx) 3583 if (idx < s_idx)
3580 continue; 3584 goto cont;
3581 if ((idev = in6_dev_get(dev)) == NULL) 3585 if ((idev = in6_dev_get(dev)) == NULL)
3582 continue; 3586 goto cont;
3583 err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid, 3587 err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid,
3584 cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI); 3588 cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI);
3585 in6_dev_put(idev); 3589 in6_dev_put(idev);
3586 if (err <= 0) 3590 if (err <= 0)
3587 break; 3591 break;
3592cont:
3593 idx++;
3588 } 3594 }
3589 read_unlock(&dev_base_lock); 3595 read_unlock(&dev_base_lock);
3590 cb->args[0] = idx; 3596 cb->args[0] = idx;
@@ -4247,7 +4253,7 @@ void __exit addrconf_cleanup(void)
4247 * clean dev list. 4253 * clean dev list.
4248 */ 4254 */
4249 4255
4250 for (dev=dev_base; dev; dev=dev->next) { 4256 for_each_netdev(dev) {
4251 if ((idev = __in6_dev_get(dev)) == NULL) 4257 if ((idev = __in6_dev_get(dev)) == NULL)
4252 continue; 4258 continue;
4253 addrconf_ifdown(dev, 1); 4259 addrconf_ifdown(dev, 1);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 09117d63256f..9b81264eb78f 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -423,14 +423,18 @@ static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr)
423 */ 423 */
424int ipv6_chk_acast_addr(struct net_device *dev, struct in6_addr *addr) 424int ipv6_chk_acast_addr(struct net_device *dev, struct in6_addr *addr)
425{ 425{
426 int found = 0;
427
426 if (dev) 428 if (dev)
427 return ipv6_chk_acast_dev(dev, addr); 429 return ipv6_chk_acast_dev(dev, addr);
428 read_lock(&dev_base_lock); 430 read_lock(&dev_base_lock);
429 for (dev=dev_base; dev; dev=dev->next) 431 for_each_netdev(dev)
430 if (ipv6_chk_acast_dev(dev, addr)) 432 if (ipv6_chk_acast_dev(dev, addr)) {
433 found = 1;
431 break; 434 break;
435 }
432 read_unlock(&dev_base_lock); 436 read_unlock(&dev_base_lock);
433 return dev != 0; 437 return found;
434} 438}
435 439
436 440
@@ -447,9 +451,8 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
447 struct ifacaddr6 *im = NULL; 451 struct ifacaddr6 *im = NULL;
448 struct ac6_iter_state *state = ac6_seq_private(seq); 452 struct ac6_iter_state *state = ac6_seq_private(seq);
449 453
450 for (state->dev = dev_base, state->idev = NULL; 454 state->idev = NULL;
451 state->dev; 455 for_each_netdev(state->dev) {
452 state->dev = state->dev->next) {
453 struct inet6_dev *idev; 456 struct inet6_dev *idev;
454 idev = in6_dev_get(state->dev); 457 idev = in6_dev_get(state->dev);
455 if (!idev) 458 if (!idev)
@@ -476,7 +479,7 @@ static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im
476 read_unlock_bh(&state->idev->lock); 479 read_unlock_bh(&state->idev->lock);
477 in6_dev_put(state->idev); 480 in6_dev_put(state->idev);
478 } 481 }
479 state->dev = state->dev->next; 482 state->dev = next_net_device(state->dev);
480 if (!state->dev) { 483 if (!state->dev) {
481 state->idev = NULL; 484 state->idev = NULL;
482 break; 485 break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 6c2758951d60..3e308fb41b49 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2331,9 +2331,8 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2331 struct ifmcaddr6 *im = NULL; 2331 struct ifmcaddr6 *im = NULL;
2332 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); 2332 struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2333 2333
2334 for (state->dev = dev_base, state->idev = NULL; 2334 state->idev = NULL;
2335 state->dev; 2335 for_each_netdev(state->dev) {
2336 state->dev = state->dev->next) {
2337 struct inet6_dev *idev; 2336 struct inet6_dev *idev;
2338 idev = in6_dev_get(state->dev); 2337 idev = in6_dev_get(state->dev);
2339 if (!idev) 2338 if (!idev)
@@ -2360,7 +2359,7 @@ static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr
2360 read_unlock_bh(&state->idev->lock); 2359 read_unlock_bh(&state->idev->lock);
2361 in6_dev_put(state->idev); 2360 in6_dev_put(state->idev);
2362 } 2361 }
2363 state->dev = state->dev->next; 2362 state->dev = next_net_device(state->dev);
2364 if (!state->dev) { 2363 if (!state->dev) {
2365 state->idev = NULL; 2364 state->idev = NULL;
2366 break; 2365 break;
@@ -2475,9 +2474,9 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
2475 struct ifmcaddr6 *im = NULL; 2474 struct ifmcaddr6 *im = NULL;
2476 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); 2475 struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
2477 2476
2478 for (state->dev = dev_base, state->idev = NULL, state->im = NULL; 2477 state->idev = NULL;
2479 state->dev; 2478 state->im = NULL;
2480 state->dev = state->dev->next) { 2479 for_each_netdev(state->dev) {
2481 struct inet6_dev *idev; 2480 struct inet6_dev *idev;
2482 idev = in6_dev_get(state->dev); 2481 idev = in6_dev_get(state->dev);
2483 if (unlikely(idev == NULL)) 2482 if (unlikely(idev == NULL))
@@ -2513,7 +2512,7 @@ static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_s
2513 read_unlock_bh(&state->idev->lock); 2512 read_unlock_bh(&state->idev->lock);
2514 in6_dev_put(state->idev); 2513 in6_dev_put(state->idev);
2515 } 2514 }
2516 state->dev = state->dev->next; 2515 state->dev = next_net_device(state->dev);
2517 if (!state->dev) { 2516 if (!state->dev) {
2518 state->idev = NULL; 2517 state->idev = NULL;
2519 goto out; 2518 goto out;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index e84c924a81ee..2f1373855a8b 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -45,7 +45,8 @@ static struct proto iucv_proto = {
45static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); 45static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
46static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); 46static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
47static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]); 47static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
48static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); 48static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
49 u8 ipuser[16]);
49static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); 50static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
50 51
51static struct iucv_sock_list iucv_sk_list = { 52static struct iucv_sock_list iucv_sk_list = {
@@ -147,11 +148,12 @@ static void iucv_sock_close(struct sock *sk)
147 unsigned char user_data[16]; 148 unsigned char user_data[16];
148 struct iucv_sock *iucv = iucv_sk(sk); 149 struct iucv_sock *iucv = iucv_sk(sk);
149 int err; 150 int err;
151 unsigned long timeo;
150 152
151 iucv_sock_clear_timer(sk); 153 iucv_sock_clear_timer(sk);
152 lock_sock(sk); 154 lock_sock(sk);
153 155
154 switch(sk->sk_state) { 156 switch (sk->sk_state) {
155 case IUCV_LISTEN: 157 case IUCV_LISTEN:
156 iucv_sock_cleanup_listen(sk); 158 iucv_sock_cleanup_listen(sk);
157 break; 159 break;
@@ -159,6 +161,21 @@ static void iucv_sock_close(struct sock *sk)
159 case IUCV_CONNECTED: 161 case IUCV_CONNECTED:
160 case IUCV_DISCONN: 162 case IUCV_DISCONN:
161 err = 0; 163 err = 0;
164
165 sk->sk_state = IUCV_CLOSING;
166 sk->sk_state_change(sk);
167
168 if (!skb_queue_empty(&iucv->send_skb_q)) {
169 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
170 timeo = sk->sk_lingertime;
171 else
172 timeo = IUCV_DISCONN_TIMEOUT;
173 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
174 }
175
176 sk->sk_state = IUCV_CLOSED;
177 sk->sk_state_change(sk);
178
162 if (iucv->path) { 179 if (iucv->path) {
163 low_nmcpy(user_data, iucv->src_name); 180 low_nmcpy(user_data, iucv->src_name);
164 high_nmcpy(user_data, iucv->dst_name); 181 high_nmcpy(user_data, iucv->dst_name);
@@ -168,12 +185,11 @@ static void iucv_sock_close(struct sock *sk)
168 iucv->path = NULL; 185 iucv->path = NULL;
169 } 186 }
170 187
171 sk->sk_state = IUCV_CLOSED;
172 sk->sk_state_change(sk);
173 sk->sk_err = ECONNRESET; 188 sk->sk_err = ECONNRESET;
174 sk->sk_state_change(sk); 189 sk->sk_state_change(sk);
175 190
176 skb_queue_purge(&iucv->send_skb_q); 191 skb_queue_purge(&iucv->send_skb_q);
192 skb_queue_purge(&iucv->backlog_skb_q);
177 193
178 sock_set_flag(sk, SOCK_ZAPPED); 194 sock_set_flag(sk, SOCK_ZAPPED);
179 break; 195 break;
@@ -204,6 +220,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
204 sock_init_data(sock, sk); 220 sock_init_data(sock, sk);
205 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 221 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
206 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); 222 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
223 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
207 iucv_sk(sk)->send_tag = 0; 224 iucv_sk(sk)->send_tag = 0;
208 225
209 sk->sk_destruct = iucv_sock_destruct; 226 sk->sk_destruct = iucv_sock_destruct;
@@ -276,7 +293,7 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
276 struct iucv_sock *isk, *n; 293 struct iucv_sock *isk, *n;
277 struct sock *sk; 294 struct sock *sk;
278 295
279 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){ 296 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
280 sk = (struct sock *) isk; 297 sk = (struct sock *) isk;
281 lock_sock(sk); 298 lock_sock(sk);
282 299
@@ -510,7 +527,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
510 long timeo; 527 long timeo;
511 int err = 0; 528 int err = 0;
512 529
513 lock_sock(sk); 530 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
514 531
515 if (sk->sk_state != IUCV_LISTEN) { 532 if (sk->sk_state != IUCV_LISTEN) {
516 err = -EBADFD; 533 err = -EBADFD;
@@ -521,7 +538,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
521 538
522 /* Wait for an incoming connection */ 539 /* Wait for an incoming connection */
523 add_wait_queue_exclusive(sk->sk_sleep, &wait); 540 add_wait_queue_exclusive(sk->sk_sleep, &wait);
524 while (!(nsk = iucv_accept_dequeue(sk, newsock))){ 541 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
525 set_current_state(TASK_INTERRUPTIBLE); 542 set_current_state(TASK_INTERRUPTIBLE);
526 if (!timeo) { 543 if (!timeo) {
527 err = -EAGAIN; 544 err = -EAGAIN;
@@ -530,7 +547,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
530 547
531 release_sock(sk); 548 release_sock(sk);
532 timeo = schedule_timeout(timeo); 549 timeo = schedule_timeout(timeo);
533 lock_sock(sk); 550 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
534 551
535 if (sk->sk_state != IUCV_LISTEN) { 552 if (sk->sk_state != IUCV_LISTEN) {
536 err = -EBADFD; 553 err = -EBADFD;
@@ -602,13 +619,13 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
602 goto out; 619 goto out;
603 } 620 }
604 621
605 if (sk->sk_state == IUCV_CONNECTED){ 622 if (sk->sk_state == IUCV_CONNECTED) {
606 if(!(skb = sock_alloc_send_skb(sk, len, 623 if (!(skb = sock_alloc_send_skb(sk, len,
607 msg->msg_flags & MSG_DONTWAIT, 624 msg->msg_flags & MSG_DONTWAIT,
608 &err))) 625 &err)))
609 return err; 626 goto out;
610 627
611 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){ 628 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
612 err = -EFAULT; 629 err = -EFAULT;
613 goto fail; 630 goto fail;
614 } 631 }
@@ -647,10 +664,16 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
647{ 664{
648 int noblock = flags & MSG_DONTWAIT; 665 int noblock = flags & MSG_DONTWAIT;
649 struct sock *sk = sock->sk; 666 struct sock *sk = sock->sk;
667 struct iucv_sock *iucv = iucv_sk(sk);
650 int target, copied = 0; 668 int target, copied = 0;
651 struct sk_buff *skb; 669 struct sk_buff *skb, *rskb, *cskb;
652 int err = 0; 670 int err = 0;
653 671
672 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
673 skb_queue_empty(&iucv->backlog_skb_q) &&
674 skb_queue_empty(&sk->sk_receive_queue))
675 return 0;
676
654 if (flags & (MSG_OOB)) 677 if (flags & (MSG_OOB))
655 return -EOPNOTSUPP; 678 return -EOPNOTSUPP;
656 679
@@ -665,10 +688,12 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
665 688
666 copied = min_t(unsigned int, skb->len, len); 689 copied = min_t(unsigned int, skb->len, len);
667 690
668 if (memcpy_toiovec(msg->msg_iov, skb->data, copied)) { 691 cskb = skb;
692 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
669 skb_queue_head(&sk->sk_receive_queue, skb); 693 skb_queue_head(&sk->sk_receive_queue, skb);
670 if (copied == 0) 694 if (copied == 0)
671 return -EFAULT; 695 return -EFAULT;
696 goto done;
672 } 697 }
673 698
674 len -= copied; 699 len -= copied;
@@ -683,6 +708,18 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
683 } 708 }
684 709
685 kfree_skb(skb); 710 kfree_skb(skb);
711
712 /* Queue backlog skbs */
713 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
714 while (rskb) {
715 if (sock_queue_rcv_skb(sk, rskb)) {
716 skb_queue_head(&iucv_sk(sk)->backlog_skb_q,
717 rskb);
718 break;
719 } else {
720 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
721 }
722 }
686 } else 723 } else
687 skb_queue_head(&sk->sk_receive_queue, skb); 724 skb_queue_head(&sk->sk_receive_queue, skb);
688 725
@@ -695,7 +732,7 @@ static inline unsigned int iucv_accept_poll(struct sock *parent)
695 struct iucv_sock *isk, *n; 732 struct iucv_sock *isk, *n;
696 struct sock *sk; 733 struct sock *sk;
697 734
698 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){ 735 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
699 sk = (struct sock *) isk; 736 sk = (struct sock *) isk;
700 737
701 if (sk->sk_state == IUCV_CONNECTED) 738 if (sk->sk_state == IUCV_CONNECTED)
@@ -726,12 +763,15 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
726 mask |= POLLHUP; 763 mask |= POLLHUP;
727 764
728 if (!skb_queue_empty(&sk->sk_receive_queue) || 765 if (!skb_queue_empty(&sk->sk_receive_queue) ||
729 (sk->sk_shutdown & RCV_SHUTDOWN)) 766 (sk->sk_shutdown & RCV_SHUTDOWN))
730 mask |= POLLIN | POLLRDNORM; 767 mask |= POLLIN | POLLRDNORM;
731 768
732 if (sk->sk_state == IUCV_CLOSED) 769 if (sk->sk_state == IUCV_CLOSED)
733 mask |= POLLHUP; 770 mask |= POLLHUP;
734 771
772 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
773 mask |= POLLIN;
774
735 if (sock_writeable(sk)) 775 if (sock_writeable(sk))
736 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 776 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
737 else 777 else
@@ -754,7 +794,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
754 return -EINVAL; 794 return -EINVAL;
755 795
756 lock_sock(sk); 796 lock_sock(sk);
757 switch(sk->sk_state) { 797 switch (sk->sk_state) {
758 case IUCV_CLOSED: 798 case IUCV_CLOSED:
759 err = -ENOTCONN; 799 err = -ENOTCONN;
760 goto fail; 800 goto fail;
@@ -770,7 +810,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
770 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, 810 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
771 (void *) prmmsg, 8); 811 (void *) prmmsg, 8);
772 if (err) { 812 if (err) {
773 switch(err) { 813 switch (err) {
774 case 1: 814 case 1:
775 err = -ENOTCONN; 815 err = -ENOTCONN;
776 break; 816 break;
@@ -817,13 +857,6 @@ static int iucv_sock_release(struct socket *sock)
817 iucv_sk(sk)->path = NULL; 857 iucv_sk(sk)->path = NULL;
818 } 858 }
819 859
820 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime){
821 lock_sock(sk);
822 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0,
823 sk->sk_lingertime);
824 release_sock(sk);
825 }
826
827 sock_orphan(sk); 860 sock_orphan(sk);
828 iucv_sock_kill(sk); 861 iucv_sock_kill(sk);
829 return err; 862 return err;
@@ -880,7 +913,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
880 913
881 /* Create the new socket */ 914 /* Create the new socket */
882 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC); 915 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
883 if (!nsk){ 916 if (!nsk) {
884 err = iucv_path_sever(path, user_data); 917 err = iucv_path_sever(path, user_data);
885 goto fail; 918 goto fail;
886 } 919 }
@@ -903,7 +936,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
903 936
904 path->msglim = IUCV_QUEUELEN_DEFAULT; 937 path->msglim = IUCV_QUEUELEN_DEFAULT;
905 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk); 938 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
906 if (err){ 939 if (err) {
907 err = iucv_path_sever(path, user_data); 940 err = iucv_path_sever(path, user_data);
908 goto fail; 941 goto fail;
909 } 942 }
@@ -927,18 +960,53 @@ static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
927 sk->sk_state_change(sk); 960 sk->sk_state_change(sk);
928} 961}
929 962
963static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len,
964 struct sk_buff_head fragmented_skb_q)
965{
966 int dataleft, size, copied = 0;
967 struct sk_buff *nskb;
968
969 dataleft = len;
970 while (dataleft) {
971 if (dataleft >= sk->sk_rcvbuf / 4)
972 size = sk->sk_rcvbuf / 4;
973 else
974 size = dataleft;
975
976 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
977 if (!nskb)
978 return -ENOMEM;
979
980 memcpy(nskb->data, skb->data + copied, size);
981 copied += size;
982 dataleft -= size;
983
984 nskb->h.raw = nskb->data;
985 nskb->nh.raw = nskb->data;
986 nskb->len = size;
987
988 skb_queue_tail(fragmented_skb_q, nskb);
989 }
990
991 return 0;
992}
993
930static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) 994static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
931{ 995{
932 struct sock *sk = path->private; 996 struct sock *sk = path->private;
933 struct sk_buff *skb; 997 struct iucv_sock *iucv = iucv_sk(sk);
998 struct sk_buff *skb, *fskb;
999 struct sk_buff_head fragmented_skb_q;
934 int rc; 1000 int rc;
935 1001
1002 skb_queue_head_init(&fragmented_skb_q);
1003
936 if (sk->sk_shutdown & RCV_SHUTDOWN) 1004 if (sk->sk_shutdown & RCV_SHUTDOWN)
937 return; 1005 return;
938 1006
939 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); 1007 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
940 if (!skb) { 1008 if (!skb) {
941 iucv_message_reject(path, msg); 1009 iucv_path_sever(path, NULL);
942 return; 1010 return;
943 } 1011 }
944 1012
@@ -952,14 +1020,39 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
952 kfree_skb(skb); 1020 kfree_skb(skb);
953 return; 1021 return;
954 } 1022 }
1023 if (skb->truesize >= sk->sk_rcvbuf / 4) {
1024 rc = iucv_fragment_skb(sk, skb, msg->length,
1025 &fragmented_skb_q);
1026 kfree_skb(skb);
1027 skb = NULL;
1028 if (rc) {
1029 iucv_path_sever(path, NULL);
1030 return;
1031 }
1032 } else {
1033 skb_reset_transport_header(skb);
1034 skb_reset_network_header(skb);
1035 skb->len = msg->length;
1036 }
1037 }
1038 /* Queue the fragmented skb */
1039 fskb = skb_dequeue(&fragmented_skb_q);
1040 while (fskb) {
1041 if (!skb_queue_empty(&iucv->backlog_skb_q))
1042 skb_queue_tail(&iucv->backlog_skb_q, fskb);
1043 else if (sock_queue_rcv_skb(sk, fskb))
1044 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, fskb);
1045 fskb = skb_dequeue(&fragmented_skb_q);
1046 }
955 1047
956 skb_reset_transport_header(skb); 1048 /* Queue the original skb if it exists (was not fragmented) */
957 skb_reset_network_header(skb); 1049 if (skb) {
958 skb->len = msg->length; 1050 if (!skb_queue_empty(&iucv->backlog_skb_q))
1051 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1052 else if (sock_queue_rcv_skb(sk, skb))
1053 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
959 } 1054 }
960 1055
961 if (sock_queue_rcv_skb(sk, skb))
962 kfree_skb(skb);
963} 1056}
964 1057
965static void iucv_callback_txdone(struct iucv_path *path, 1058static void iucv_callback_txdone(struct iucv_path *path,
@@ -971,17 +1064,27 @@ static void iucv_callback_txdone(struct iucv_path *path,
971 struct sk_buff *list_skb = list->next; 1064 struct sk_buff *list_skb = list->next;
972 unsigned long flags; 1065 unsigned long flags;
973 1066
974 spin_lock_irqsave(&list->lock, flags); 1067 if (list_skb) {
1068 spin_lock_irqsave(&list->lock, flags);
1069
1070 do {
1071 this = list_skb;
1072 list_skb = list_skb->next;
1073 } while (memcmp(&msg->tag, this->cb, 4) && list_skb);
1074
1075 spin_unlock_irqrestore(&list->lock, flags);
975 1076
976 do { 1077 skb_unlink(this, &iucv_sk(sk)->send_skb_q);
977 this = list_skb; 1078 kfree_skb(this);
978 list_skb = list_skb->next; 1079 }
979 } while (memcmp(&msg->tag, this->cb, 4));
980 1080
981 spin_unlock_irqrestore(&list->lock, flags); 1081 if (sk->sk_state == IUCV_CLOSING) {
1082 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1083 sk->sk_state = IUCV_CLOSED;
1084 sk->sk_state_change(sk);
1085 }
1086 }
982 1087
983 skb_unlink(this, &iucv_sk(sk)->send_skb_q);
984 kfree_skb(this);
985} 1088}
986 1089
987static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) 1090static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
@@ -1022,7 +1125,7 @@ static struct net_proto_family iucv_sock_family_ops = {
1022 .create = iucv_sock_create, 1125 .create = iucv_sock_create,
1023}; 1126};
1024 1127
1025static int afiucv_init(void) 1128static int __init afiucv_init(void)
1026{ 1129{
1027 int err; 1130 int err;
1028 1131
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 903bdb6eaaa1..fb3faf72e850 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -32,7 +32,6 @@
32 32
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/moduleparam.h> 34#include <linux/moduleparam.h>
35
36#include <linux/spinlock.h> 35#include <linux/spinlock.h>
37#include <linux/kernel.h> 36#include <linux/kernel.h>
38#include <linux/slab.h> 37#include <linux/slab.h>
@@ -69,7 +68,7 @@
69#define IUCV_IPNORPY 0x10 68#define IUCV_IPNORPY 0x10
70#define IUCV_IPALL 0x80 69#define IUCV_IPALL 0x80
71 70
72static int iucv_bus_match (struct device *dev, struct device_driver *drv) 71static int iucv_bus_match(struct device *dev, struct device_driver *drv)
73{ 72{
74 return 0; 73 return 0;
75} 74}
@@ -78,8 +77,11 @@ struct bus_type iucv_bus = {
78 .name = "iucv", 77 .name = "iucv",
79 .match = iucv_bus_match, 78 .match = iucv_bus_match,
80}; 79};
80EXPORT_SYMBOL(iucv_bus);
81 81
82struct device *iucv_root; 82struct device *iucv_root;
83EXPORT_SYMBOL(iucv_root);
84
83static int iucv_available; 85static int iucv_available;
84 86
85/* General IUCV interrupt structure */ 87/* General IUCV interrupt structure */
@@ -405,7 +407,7 @@ static void iucv_declare_cpu(void *data)
405 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); 407 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
406 if (rc) { 408 if (rc) {
407 char *err = "Unknown"; 409 char *err = "Unknown";
408 switch(rc) { 410 switch (rc) {
409 case 0x03: 411 case 0x03:
410 err = "Directory error"; 412 err = "Directory error";
411 break; 413 break;
@@ -588,7 +590,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
588 return NOTIFY_OK; 590 return NOTIFY_OK;
589} 591}
590 592
591static struct notifier_block iucv_cpu_notifier = { 593static struct notifier_block __cpuinitdata iucv_cpu_notifier = {
592 .notifier_call = iucv_cpu_notify, 594 .notifier_call = iucv_cpu_notify,
593}; 595};
594 596
@@ -691,6 +693,7 @@ out_mutex:
691 mutex_unlock(&iucv_register_mutex); 693 mutex_unlock(&iucv_register_mutex);
692 return rc; 694 return rc;
693} 695}
696EXPORT_SYMBOL(iucv_register);
694 697
695/** 698/**
696 * iucv_unregister 699 * iucv_unregister
@@ -723,6 +726,7 @@ void iucv_unregister(struct iucv_handler *handler, int smp)
723 iucv_setmask_mp(); 726 iucv_setmask_mp();
724 mutex_unlock(&iucv_register_mutex); 727 mutex_unlock(&iucv_register_mutex);
725} 728}
729EXPORT_SYMBOL(iucv_unregister);
726 730
727/** 731/**
728 * iucv_path_accept 732 * iucv_path_accept
@@ -761,6 +765,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
761 local_bh_enable(); 765 local_bh_enable();
762 return rc; 766 return rc;
763} 767}
768EXPORT_SYMBOL(iucv_path_accept);
764 769
765/** 770/**
766 * iucv_path_connect 771 * iucv_path_connect
@@ -824,6 +829,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
824 spin_unlock_bh(&iucv_table_lock); 829 spin_unlock_bh(&iucv_table_lock);
825 return rc; 830 return rc;
826} 831}
832EXPORT_SYMBOL(iucv_path_connect);
827 833
828/** 834/**
829 * iucv_path_quiesce: 835 * iucv_path_quiesce:
@@ -850,6 +856,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
850 local_bh_enable(); 856 local_bh_enable();
851 return rc; 857 return rc;
852} 858}
859EXPORT_SYMBOL(iucv_path_quiesce);
853 860
854/** 861/**
855 * iucv_path_resume: 862 * iucv_path_resume:
@@ -890,7 +897,6 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
890{ 897{
891 int rc; 898 int rc;
892 899
893
894 preempt_disable(); 900 preempt_disable();
895 if (iucv_active_cpu != smp_processor_id()) 901 if (iucv_active_cpu != smp_processor_id())
896 spin_lock_bh(&iucv_table_lock); 902 spin_lock_bh(&iucv_table_lock);
@@ -904,6 +910,7 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
904 preempt_enable(); 910 preempt_enable();
905 return rc; 911 return rc;
906} 912}
913EXPORT_SYMBOL(iucv_path_sever);
907 914
908/** 915/**
909 * iucv_message_purge 916 * iucv_message_purge
@@ -936,6 +943,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
936 local_bh_enable(); 943 local_bh_enable();
937 return rc; 944 return rc;
938} 945}
946EXPORT_SYMBOL(iucv_message_purge);
939 947
940/** 948/**
941 * iucv_message_receive 949 * iucv_message_receive
@@ -1006,6 +1014,7 @@ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
1006 local_bh_enable(); 1014 local_bh_enable();
1007 return rc; 1015 return rc;
1008} 1016}
1017EXPORT_SYMBOL(iucv_message_receive);
1009 1018
1010/** 1019/**
1011 * iucv_message_reject 1020 * iucv_message_reject
@@ -1034,6 +1043,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
1034 local_bh_enable(); 1043 local_bh_enable();
1035 return rc; 1044 return rc;
1036} 1045}
1046EXPORT_SYMBOL(iucv_message_reject);
1037 1047
1038/** 1048/**
1039 * iucv_message_reply 1049 * iucv_message_reply
@@ -1077,6 +1087,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
1077 local_bh_enable(); 1087 local_bh_enable();
1078 return rc; 1088 return rc;
1079} 1089}
1090EXPORT_SYMBOL(iucv_message_reply);
1080 1091
1081/** 1092/**
1082 * iucv_message_send 1093 * iucv_message_send
@@ -1125,6 +1136,7 @@ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1125 local_bh_enable(); 1136 local_bh_enable();
1126 return rc; 1137 return rc;
1127} 1138}
1139EXPORT_SYMBOL(iucv_message_send);
1128 1140
1129/** 1141/**
1130 * iucv_message_send2way 1142 * iucv_message_send2way
@@ -1181,6 +1193,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
1181 local_bh_enable(); 1193 local_bh_enable();
1182 return rc; 1194 return rc;
1183} 1195}
1196EXPORT_SYMBOL(iucv_message_send2way);
1184 1197
1185/** 1198/**
1186 * iucv_path_pending 1199 * iucv_path_pending
@@ -1572,7 +1585,7 @@ static void iucv_external_interrupt(u16 code)
1572 * 1585 *
1573 * Allocates and initializes various data structures. 1586 * Allocates and initializes various data structures.
1574 */ 1587 */
1575static int iucv_init(void) 1588static int __init iucv_init(void)
1576{ 1589{
1577 int rc; 1590 int rc;
1578 1591
@@ -1583,7 +1596,7 @@ static int iucv_init(void)
1583 rc = iucv_query_maxconn(); 1596 rc = iucv_query_maxconn();
1584 if (rc) 1597 if (rc)
1585 goto out; 1598 goto out;
1586 rc = register_external_interrupt (0x4000, iucv_external_interrupt); 1599 rc = register_external_interrupt(0x4000, iucv_external_interrupt);
1587 if (rc) 1600 if (rc)
1588 goto out; 1601 goto out;
1589 rc = bus_register(&iucv_bus); 1602 rc = bus_register(&iucv_bus);
@@ -1594,7 +1607,7 @@ static int iucv_init(void)
1594 rc = PTR_ERR(iucv_root); 1607 rc = PTR_ERR(iucv_root);
1595 goto out_bus; 1608 goto out_bus;
1596 } 1609 }
1597 /* Note: GFP_DMA used used to get memory below 2G */ 1610 /* Note: GFP_DMA used to get memory below 2G */
1598 iucv_irq_data = percpu_alloc(sizeof(struct iucv_irq_data), 1611 iucv_irq_data = percpu_alloc(sizeof(struct iucv_irq_data),
1599 GFP_KERNEL|GFP_DMA); 1612 GFP_KERNEL|GFP_DMA);
1600 if (!iucv_irq_data) { 1613 if (!iucv_irq_data) {
@@ -1632,7 +1645,7 @@ out:
1632 * 1645 *
1633 * Frees everything allocated from iucv_init. 1646 * Frees everything allocated from iucv_init.
1634 */ 1647 */
1635static void iucv_exit(void) 1648static void __exit iucv_exit(void)
1636{ 1649{
1637 struct iucv_irq_list *p, *n; 1650 struct iucv_irq_list *p, *n;
1638 1651
@@ -1653,24 +1666,6 @@ static void iucv_exit(void)
1653subsys_initcall(iucv_init); 1666subsys_initcall(iucv_init);
1654module_exit(iucv_exit); 1667module_exit(iucv_exit);
1655 1668
1656/**
1657 * Export all public stuff
1658 */
1659EXPORT_SYMBOL (iucv_bus);
1660EXPORT_SYMBOL (iucv_root);
1661EXPORT_SYMBOL (iucv_register);
1662EXPORT_SYMBOL (iucv_unregister);
1663EXPORT_SYMBOL (iucv_path_accept);
1664EXPORT_SYMBOL (iucv_path_connect);
1665EXPORT_SYMBOL (iucv_path_quiesce);
1666EXPORT_SYMBOL (iucv_path_sever);
1667EXPORT_SYMBOL (iucv_message_purge);
1668EXPORT_SYMBOL (iucv_message_receive);
1669EXPORT_SYMBOL (iucv_message_reject);
1670EXPORT_SYMBOL (iucv_message_reply);
1671EXPORT_SYMBOL (iucv_message_send);
1672EXPORT_SYMBOL (iucv_message_send2way);
1673
1674MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); 1669MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
1675MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); 1670MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
1676MODULE_LICENSE("GPL"); 1671MODULE_LICENSE("GPL");
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index d12413cff5bd..d4b13a031fd5 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -160,8 +160,14 @@ static struct packet_type llc_tr_packet_type = {
160 160
161static int __init llc_init(void) 161static int __init llc_init(void)
162{ 162{
163 if (dev_base->next) 163 struct net_device *dev;
164 memcpy(llc_station_mac_sa, dev_base->next->dev_addr, ETH_ALEN); 164
165 dev = first_net_device();
166 if (dev != NULL)
167 dev = next_net_device(dev);
168
169 if (dev != NULL)
170 memcpy(llc_station_mac_sa, dev->dev_addr, ETH_ALEN);
165 else 171 else
166 memset(llc_station_mac_sa, 0, ETH_ALEN); 172 memset(llc_station_mac_sa, 0, ETH_ALEN);
167 dev_add_pack(&llc_packet_type); 173 dev_add_pack(&llc_packet_type);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 42d2fb94eff1..507828d7d4ae 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -140,6 +140,14 @@ static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
140 140
141static void netlink_sock_destruct(struct sock *sk) 141static void netlink_sock_destruct(struct sock *sk)
142{ 142{
143 struct netlink_sock *nlk = nlk_sk(sk);
144
145 if (nlk->cb) {
146 if (nlk->cb->done)
147 nlk->cb->done(nlk->cb);
148 netlink_destroy_callback(nlk->cb);
149 }
150
143 skb_queue_purge(&sk->sk_receive_queue); 151 skb_queue_purge(&sk->sk_receive_queue);
144 152
145 if (!sock_flag(sk, SOCK_DEAD)) { 153 if (!sock_flag(sk, SOCK_DEAD)) {
@@ -148,7 +156,6 @@ static void netlink_sock_destruct(struct sock *sk)
148 } 156 }
149 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); 157 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
150 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); 158 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
151 BUG_TRAP(!nlk_sk(sk)->cb);
152 BUG_TRAP(!nlk_sk(sk)->groups); 159 BUG_TRAP(!nlk_sk(sk)->groups);
153} 160}
154 161
@@ -456,17 +463,10 @@ static int netlink_release(struct socket *sock)
456 sock_orphan(sk); 463 sock_orphan(sk);
457 nlk = nlk_sk(sk); 464 nlk = nlk_sk(sk);
458 465
459 mutex_lock(nlk->cb_mutex); 466 /*
460 if (nlk->cb) { 467 * OK. Socket is unlinked, any packets that arrive now
461 if (nlk->cb->done) 468 * will be purged.
462 nlk->cb->done(nlk->cb); 469 */
463 netlink_destroy_callback(nlk->cb);
464 nlk->cb = NULL;
465 }
466 mutex_unlock(nlk->cb_mutex);
467
468 /* OK. Socket is unlinked, and, therefore,
469 no new packets will arrive */
470 470
471 sock->sk = NULL; 471 sock->sk = NULL;
472 wake_up_interruptible_all(&nlk->wait); 472 wake_up_interruptible_all(&nlk->wait);
@@ -1245,16 +1245,14 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1245 siocb->scm = &scm; 1245 siocb->scm = &scm;
1246 } 1246 }
1247 siocb->scm->creds = *NETLINK_CREDS(skb); 1247 siocb->scm->creds = *NETLINK_CREDS(skb);
1248 if (flags & MSG_TRUNC)
1249 copied = skb->len;
1248 skb_free_datagram(sk, skb); 1250 skb_free_datagram(sk, skb);
1249 1251
1250 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) 1252 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1251 netlink_dump(sk); 1253 netlink_dump(sk);
1252 1254
1253 scm_recv(sock, msg, siocb->scm, flags); 1255 scm_recv(sock, msg, siocb->scm, flags);
1254
1255 if (flags & MSG_TRUNC)
1256 copied = skb->len;
1257
1258out: 1256out:
1259 netlink_rcv_wake(sk); 1257 netlink_rcv_wake(sk);
1260 return err ? : copied; 1258 return err ? : copied;
@@ -1426,9 +1424,9 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1426 return -ECONNREFUSED; 1424 return -ECONNREFUSED;
1427 } 1425 }
1428 nlk = nlk_sk(sk); 1426 nlk = nlk_sk(sk);
1429 /* A dump or destruction is in progress... */ 1427 /* A dump is in progress... */
1430 mutex_lock(nlk->cb_mutex); 1428 mutex_lock(nlk->cb_mutex);
1431 if (nlk->cb || sock_flag(sk, SOCK_DEAD)) { 1429 if (nlk->cb) {
1432 mutex_unlock(nlk->cb_mutex); 1430 mutex_unlock(nlk->cb_mutex);
1433 netlink_destroy_callback(cb); 1431 netlink_destroy_callback(cb);
1434 sock_put(sk); 1432 sock_put(sk);
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 8e6bd4e9d82c..2f76e062609d 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -598,7 +598,7 @@ struct net_device *nr_dev_first(void)
598 struct net_device *dev, *first = NULL; 598 struct net_device *dev, *first = NULL;
599 599
600 read_lock(&dev_base_lock); 600 read_lock(&dev_base_lock);
601 for (dev = dev_base; dev != NULL; dev = dev->next) { 601 for_each_netdev(dev) {
602 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM) 602 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
603 if (first == NULL || strncmp(dev->name, first->name, 3) < 0) 603 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
604 first = dev; 604 first = dev;
@@ -618,12 +618,13 @@ struct net_device *nr_dev_get(ax25_address *addr)
618 struct net_device *dev; 618 struct net_device *dev;
619 619
620 read_lock(&dev_base_lock); 620 read_lock(&dev_base_lock);
621 for (dev = dev_base; dev != NULL; dev = dev->next) { 621 for_each_netdev(dev) {
622 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) { 622 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
623 dev_hold(dev); 623 dev_hold(dev);
624 goto out; 624 goto out;
625 } 625 }
626 } 626 }
627 dev = NULL;
627out: 628out:
628 read_unlock(&dev_base_lock); 629 read_unlock(&dev_base_lock);
629 return dev; 630 return dev;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 1f9aefd95a99..929a784a86d7 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -596,7 +596,7 @@ struct net_device *rose_dev_first(void)
596 struct net_device *dev, *first = NULL; 596 struct net_device *dev, *first = NULL;
597 597
598 read_lock(&dev_base_lock); 598 read_lock(&dev_base_lock);
599 for (dev = dev_base; dev != NULL; dev = dev->next) { 599 for_each_netdev(dev) {
600 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE) 600 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE)
601 if (first == NULL || strncmp(dev->name, first->name, 3) < 0) 601 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
602 first = dev; 602 first = dev;
@@ -614,12 +614,13 @@ struct net_device *rose_dev_get(rose_address *addr)
614 struct net_device *dev; 614 struct net_device *dev;
615 615
616 read_lock(&dev_base_lock); 616 read_lock(&dev_base_lock);
617 for (dev = dev_base; dev != NULL; dev = dev->next) { 617 for_each_netdev(dev) {
618 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) { 618 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) {
619 dev_hold(dev); 619 dev_hold(dev);
620 goto out; 620 goto out;
621 } 621 }
622 } 622 }
623 dev = NULL;
623out: 624out:
624 read_unlock(&dev_base_lock); 625 read_unlock(&dev_base_lock);
625 return dev; 626 return dev;
@@ -630,10 +631,11 @@ static int rose_dev_exists(rose_address *addr)
630 struct net_device *dev; 631 struct net_device *dev;
631 632
632 read_lock(&dev_base_lock); 633 read_lock(&dev_base_lock);
633 for (dev = dev_base; dev != NULL; dev = dev->next) { 634 for_each_netdev(dev) {
634 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) 635 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0)
635 goto out; 636 goto out;
636 } 637 }
638 dev = NULL;
637out: 639out:
638 read_unlock(&dev_base_lock); 640 read_unlock(&dev_base_lock);
639 return dev != NULL; 641 return dev != NULL;
diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
index 8750f6da6bc7..91b3d52f6f1a 100644
--- a/net/rxrpc/Kconfig
+++ b/net/rxrpc/Kconfig
@@ -5,6 +5,7 @@
5config AF_RXRPC 5config AF_RXRPC
6 tristate "RxRPC session sockets" 6 tristate "RxRPC session sockets"
7 depends on EXPERIMENTAL 7 depends on EXPERIMENTAL
8 select KEYS
8 help 9 help
9 Say Y or M here to include support for RxRPC session sockets (just 10 Say Y or M here to include support for RxRPC session sockets (just
10 the transport part, not the presentation part: (un)marshalling is 11 the transport part, not the presentation part: (un)marshalling is
@@ -29,7 +30,7 @@ config AF_RXRPC_DEBUG
29 30
30config RXKAD 31config RXKAD
31 tristate "RxRPC Kerberos security" 32 tristate "RxRPC Kerberos security"
32 depends on AF_RXRPC && KEYS 33 depends on AF_RXRPC
33 select CRYPTO 34 select CRYPTO
34 select CRYPTO_MANAGER 35 select CRYPTO_MANAGER
35 select CRYPTO_BLKCIPHER 36 select CRYPTO_BLKCIPHER
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index fc07a926df56..657ee69f2133 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -543,6 +543,38 @@ static void rxrpc_zap_tx_window(struct rxrpc_call *call)
543} 543}
544 544
545/* 545/*
546 * process the extra information that may be appended to an ACK packet
547 */
548static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
549 unsigned latest, int nAcks)
550{
551 struct rxrpc_ackinfo ackinfo;
552 struct rxrpc_peer *peer;
553 unsigned mtu;
554
555 if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
556 _leave(" [no ackinfo]");
557 return;
558 }
559
560 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
561 latest,
562 ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU),
563 ntohl(ackinfo.rwind), ntohl(ackinfo.jumbo_max));
564
565 mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU));
566
567 peer = call->conn->trans->peer;
568 if (mtu < peer->maxdata) {
569 spin_lock_bh(&peer->lock);
570 peer->maxdata = mtu;
571 peer->mtu = mtu + peer->hdrsize;
572 spin_unlock_bh(&peer->lock);
573 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
574 }
575}
576
577/*
546 * process packets in the reception queue 578 * process packets in the reception queue
547 */ 579 */
548static int rxrpc_process_rx_queue(struct rxrpc_call *call, 580static int rxrpc_process_rx_queue(struct rxrpc_call *call,
@@ -606,6 +638,8 @@ process_further:
606 rxrpc_acks[ack.reason], 638 rxrpc_acks[ack.reason],
607 ack.nAcks); 639 ack.nAcks);
608 640
641 rxrpc_extract_ackinfo(call, skb, latest, ack.nAcks);
642
609 if (ack.reason == RXRPC_ACK_PING) { 643 if (ack.reason == RXRPC_ACK_PING) {
610 _proto("Rx ACK %%%u PING Request", latest); 644 _proto("Rx ACK %%%u PING Request", latest);
611 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, 645 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
@@ -801,9 +835,9 @@ void rxrpc_process_call(struct work_struct *work)
801 struct msghdr msg; 835 struct msghdr msg;
802 struct kvec iov[5]; 836 struct kvec iov[5];
803 unsigned long bits; 837 unsigned long bits;
804 __be32 data; 838 __be32 data, pad;
805 size_t len; 839 size_t len;
806 int genbit, loop, nbit, ioc, ret; 840 int genbit, loop, nbit, ioc, ret, mtu;
807 u32 abort_code = RX_PROTOCOL_ERROR; 841 u32 abort_code = RX_PROTOCOL_ERROR;
808 u8 *acks = NULL; 842 u8 *acks = NULL;
809 843
@@ -899,9 +933,30 @@ void rxrpc_process_call(struct work_struct *work)
899 } 933 }
900 934
901 if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) { 935 if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) {
902 hdr.type = RXRPC_PACKET_TYPE_ACKALL;
903 genbit = RXRPC_CALL_ACK_FINAL; 936 genbit = RXRPC_CALL_ACK_FINAL;
904 goto send_message; 937
938 ack.bufferSpace = htons(8);
939 ack.maxSkew = 0;
940 ack.serial = 0;
941 ack.reason = RXRPC_ACK_IDLE;
942 ack.nAcks = 0;
943 call->ackr_reason = 0;
944
945 spin_lock_bh(&call->lock);
946 ack.serial = call->ackr_serial;
947 ack.previousPacket = call->ackr_prev_seq;
948 ack.firstPacket = htonl(call->rx_data_eaten + 1);
949 spin_unlock_bh(&call->lock);
950
951 pad = 0;
952
953 iov[1].iov_base = &ack;
954 iov[1].iov_len = sizeof(ack);
955 iov[2].iov_base = &pad;
956 iov[2].iov_len = 3;
957 iov[3].iov_base = &ackinfo;
958 iov[3].iov_len = sizeof(ackinfo);
959 goto send_ACK;
905 } 960 }
906 961
907 if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) | 962 if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) |
@@ -971,8 +1026,6 @@ void rxrpc_process_call(struct work_struct *work)
971 1026
972 /* consider sending an ordinary ACK */ 1027 /* consider sending an ordinary ACK */
973 if (test_bit(RXRPC_CALL_ACK, &call->events)) { 1028 if (test_bit(RXRPC_CALL_ACK, &call->events)) {
974 __be32 pad;
975
976 _debug("send ACK: window: %d - %d { %lx }", 1029 _debug("send ACK: window: %d - %d { %lx }",
977 call->rx_data_eaten, call->ackr_win_top, 1030 call->rx_data_eaten, call->ackr_win_top,
978 call->ackr_window[0]); 1031 call->ackr_window[0]);
@@ -997,12 +1050,6 @@ void rxrpc_process_call(struct work_struct *work)
997 ack.serial = 0; 1050 ack.serial = 0;
998 ack.reason = 0; 1051 ack.reason = 0;
999 1052
1000 ackinfo.rxMTU = htonl(5692);
1001// ackinfo.rxMTU = htonl(call->conn->trans->peer->maxdata);
1002 ackinfo.maxMTU = htonl(call->conn->trans->peer->maxdata);
1003 ackinfo.rwind = htonl(32);
1004 ackinfo.jumbo_max = htonl(4);
1005
1006 spin_lock_bh(&call->lock); 1053 spin_lock_bh(&call->lock);
1007 ack.reason = call->ackr_reason; 1054 ack.reason = call->ackr_reason;
1008 ack.serial = call->ackr_serial; 1055 ack.serial = call->ackr_serial;
@@ -1116,6 +1163,15 @@ send_ACK_with_skew:
1116 ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - 1163 ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
1117 ntohl(ack.serial)); 1164 ntohl(ack.serial));
1118send_ACK: 1165send_ACK:
1166 mtu = call->conn->trans->peer->if_mtu;
1167 mtu -= call->conn->trans->peer->hdrsize;
1168 ackinfo.maxMTU = htonl(mtu);
1169 ackinfo.rwind = htonl(32);
1170
1171 /* permit the peer to send us jumbo packets if it wants to */
1172 ackinfo.rxMTU = htonl(5692);
1173 ackinfo.jumbo_max = htonl(4);
1174
1119 hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); 1175 hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
1120 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", 1176 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
1121 ntohl(hdr.serial), 1177 ntohl(hdr.serial),
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index 2c27df1ffa17..6cb3e8890e7e 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -100,8 +100,10 @@ void rxrpc_UDP_error_report(struct sock *sk)
100 } 100 }
101 101
102 if (mtu < peer->mtu) { 102 if (mtu < peer->mtu) {
103 spin_lock_bh(&peer->lock);
103 peer->mtu = mtu; 104 peer->mtu = mtu;
104 peer->maxdata = peer->mtu - peer->hdrsize; 105 peer->maxdata = peer->mtu - peer->hdrsize;
106 spin_unlock_bh(&peer->lock);
105 _net("Net MTU %u (maxdata %u)", 107 _net("Net MTU %u (maxdata %u)",
106 peer->mtu, peer->maxdata); 108 peer->mtu, peer->maxdata);
107 } 109 }
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 5cdde4a48ed1..591c4422205e 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -582,7 +582,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
582 max &= ~(call->conn->size_align - 1UL); 582 max &= ~(call->conn->size_align - 1UL);
583 583
584 chunk = max; 584 chunk = max;
585 if (chunk > len) 585 if (chunk > len && !more)
586 chunk = len; 586 chunk = len;
587 587
588 space = chunk + call->conn->size_align; 588 space = chunk + call->conn->size_align;
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index d399de4a7fe2..ce08b78647ce 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -19,6 +19,7 @@
19#include <net/sock.h> 19#include <net/sock.h>
20#include <net/af_rxrpc.h> 20#include <net/af_rxrpc.h>
21#include <net/ip.h> 21#include <net/ip.h>
22#include <net/route.h>
22#include "ar-internal.h" 23#include "ar-internal.h"
23 24
24static LIST_HEAD(rxrpc_peers); 25static LIST_HEAD(rxrpc_peers);
@@ -28,6 +29,47 @@ static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq);
28static void rxrpc_destroy_peer(struct work_struct *work); 29static void rxrpc_destroy_peer(struct work_struct *work);
29 30
30/* 31/*
32 * assess the MTU size for the network interface through which this peer is
33 * reached
34 */
35static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
36{
37 struct rtable *rt;
38 struct flowi fl;
39 int ret;
40
41 peer->if_mtu = 1500;
42
43 memset(&fl, 0, sizeof(fl));
44
45 switch (peer->srx.transport.family) {
46 case AF_INET:
47 fl.oif = 0;
48 fl.proto = IPPROTO_UDP,
49 fl.nl_u.ip4_u.saddr = 0;
50 fl.nl_u.ip4_u.daddr = peer->srx.transport.sin.sin_addr.s_addr;
51 fl.nl_u.ip4_u.tos = 0;
52 /* assume AFS.CM talking to AFS.FS */
53 fl.uli_u.ports.sport = htons(7001);
54 fl.uli_u.ports.dport = htons(7000);
55 break;
56 default:
57 BUG();
58 }
59
60 ret = ip_route_output_key(&rt, &fl);
61 if (ret < 0) {
62 kleave(" [route err %d]", ret);
63 return;
64 }
65
66 peer->if_mtu = dst_mtu(&rt->u.dst);
67 dst_release(&rt->u.dst);
68
69 kleave(" [if_mtu %u]", peer->if_mtu);
70}
71
72/*
31 * allocate a new peer 73 * allocate a new peer
32 */ 74 */
33static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx, 75static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
@@ -47,7 +89,8 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
47 peer->debug_id = atomic_inc_return(&rxrpc_debug_id); 89 peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
48 memcpy(&peer->srx, srx, sizeof(*srx)); 90 memcpy(&peer->srx, srx, sizeof(*srx));
49 91
50 peer->mtu = peer->if_mtu = 65535; 92 rxrpc_assess_MTU_size(peer);
93 peer->mtu = peer->if_mtu;
51 94
52 if (srx->transport.family == AF_INET) { 95 if (srx->transport.family == AF_INET) {
53 peer->hdrsize = sizeof(struct iphdr); 96 peer->hdrsize = sizeof(struct iphdr);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 8699e7006d80..bec600af03ca 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -894,9 +894,10 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
894 s_idx = cb->args[0]; 894 s_idx = cb->args[0];
895 s_q_idx = q_idx = cb->args[1]; 895 s_q_idx = q_idx = cb->args[1];
896 read_lock(&dev_base_lock); 896 read_lock(&dev_base_lock);
897 for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) { 897 idx = 0;
898 for_each_netdev(dev) {
898 if (idx < s_idx) 899 if (idx < s_idx)
899 continue; 900 goto cont;
900 if (idx > s_idx) 901 if (idx > s_idx)
901 s_q_idx = 0; 902 s_q_idx = 0;
902 q_idx = 0; 903 q_idx = 0;
@@ -910,6 +911,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
910 goto done; 911 goto done;
911 q_idx++; 912 q_idx++;
912 } 913 }
914cont:
915 idx++;
913 } 916 }
914 917
915done: 918done:
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index db73ef97485a..df94e3cdfba3 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1103,6 +1103,13 @@ void sctp_assoc_update(struct sctp_association *asoc,
1103 asoc->ssnmap = new->ssnmap; 1103 asoc->ssnmap = new->ssnmap;
1104 new->ssnmap = NULL; 1104 new->ssnmap = NULL;
1105 } 1105 }
1106
1107 if (!asoc->assoc_id) {
1108 /* get a new association id since we don't have one
1109 * yet.
1110 */
1111 sctp_assoc_set_id(asoc, GFP_ATOMIC);
1112 }
1106 } 1113 }
1107} 1114}
1108 1115
@@ -1375,3 +1382,25 @@ out:
1375 sctp_read_unlock(&asoc->base.addr_lock); 1382 sctp_read_unlock(&asoc->base.addr_lock);
1376 return found; 1383 return found;
1377} 1384}
1385
1386/* Set an association id for a given association */
1387int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1388{
1389 int assoc_id;
1390 int error = 0;
1391retry:
1392 if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp)))
1393 return -ENOMEM;
1394
1395 spin_lock_bh(&sctp_assocs_id_lock);
1396 error = idr_get_new_above(&sctp_assocs_id, (void *)asoc,
1397 1, &assoc_id);
1398 spin_unlock_bh(&sctp_assocs_id_lock);
1399 if (error == -EAGAIN)
1400 goto retry;
1401 else if (error)
1402 return error;
1403
1404 asoc->assoc_id = (sctp_assoc_t) assoc_id;
1405 return error;
1406}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index ca527a27dd05..84cd53635fe8 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -992,45 +992,52 @@ static struct sctp_pf sctp_pf_inet6_specific = {
992 .af = &sctp_ipv6_specific, 992 .af = &sctp_ipv6_specific,
993}; 993};
994 994
995/* Initialize IPv6 support and register with inet6 stack. */ 995/* Initialize IPv6 support and register with socket layer. */
996int sctp_v6_init(void) 996int sctp_v6_init(void)
997{ 997{
998 int rc = proto_register(&sctpv6_prot, 1); 998 int rc;
999 999
1000 /* Register the SCTP specific PF_INET6 functions. */
1001 sctp_register_pf(&sctp_pf_inet6_specific, PF_INET6);
1002
1003 /* Register the SCTP specific AF_INET6 functions. */
1004 sctp_register_af(&sctp_ipv6_specific);
1005
1006 rc = proto_register(&sctpv6_prot, 1);
1000 if (rc) 1007 if (rc)
1001 goto out; 1008 return rc;
1002 /* Register inet6 protocol. */
1003 rc = -EAGAIN;
1004 if (inet6_add_protocol(&sctpv6_protocol, IPPROTO_SCTP) < 0)
1005 goto out_unregister_sctp_proto;
1006 1009
1007 /* Add SCTPv6(UDP and TCP style) to inetsw6 linked list. */ 1010 /* Add SCTPv6(UDP and TCP style) to inetsw6 linked list. */
1008 inet6_register_protosw(&sctpv6_seqpacket_protosw); 1011 inet6_register_protosw(&sctpv6_seqpacket_protosw);
1009 inet6_register_protosw(&sctpv6_stream_protosw); 1012 inet6_register_protosw(&sctpv6_stream_protosw);
1010 1013
1011 /* Register the SCTP specific PF_INET6 functions. */ 1014 return 0;
1012 sctp_register_pf(&sctp_pf_inet6_specific, PF_INET6); 1015}
1013
1014 /* Register the SCTP specific AF_INET6 functions. */
1015 sctp_register_af(&sctp_ipv6_specific);
1016 1016
1017/* Register with inet6 layer. */
1018int sctp_v6_add_protocol(void)
1019{
1017 /* Register notifier for inet6 address additions/deletions. */ 1020 /* Register notifier for inet6 address additions/deletions. */
1018 register_inet6addr_notifier(&sctp_inet6addr_notifier); 1021 register_inet6addr_notifier(&sctp_inet6addr_notifier);
1019 rc = 0; 1022
1020out: 1023 if (inet6_add_protocol(&sctpv6_protocol, IPPROTO_SCTP) < 0)
1021 return rc; 1024 return -EAGAIN;
1022out_unregister_sctp_proto: 1025
1023 proto_unregister(&sctpv6_prot); 1026 return 0;
1024 goto out;
1025} 1027}
1026 1028
1027/* IPv6 specific exit support. */ 1029/* IPv6 specific exit support. */
1028void sctp_v6_exit(void) 1030void sctp_v6_exit(void)
1029{ 1031{
1030 list_del(&sctp_ipv6_specific.list);
1031 inet6_del_protocol(&sctpv6_protocol, IPPROTO_SCTP);
1032 inet6_unregister_protosw(&sctpv6_seqpacket_protosw); 1032 inet6_unregister_protosw(&sctpv6_seqpacket_protosw);
1033 inet6_unregister_protosw(&sctpv6_stream_protosw); 1033 inet6_unregister_protosw(&sctpv6_stream_protosw);
1034 unregister_inet6addr_notifier(&sctp_inet6addr_notifier);
1035 proto_unregister(&sctpv6_prot); 1034 proto_unregister(&sctpv6_prot);
1035 list_del(&sctp_ipv6_specific.list);
1036}
1037
1038/* Unregister with inet6 layer. */
1039void sctp_v6_del_protocol(void)
1040{
1041 inet6_del_protocol(&sctpv6_protocol, IPPROTO_SCTP);
1042 unregister_inet6addr_notifier(&sctp_inet6addr_notifier);
1036} 1043}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index c361deb6cea9..34bab36637ac 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -170,7 +170,7 @@ static void sctp_get_local_addr_list(void)
170 struct sctp_af *af; 170 struct sctp_af *af;
171 171
172 read_lock(&dev_base_lock); 172 read_lock(&dev_base_lock);
173 for (dev = dev_base; dev; dev = dev->next) { 173 for_each_netdev(dev) {
174 __list_for_each(pos, &sctp_address_families) { 174 __list_for_each(pos, &sctp_address_families) {
175 af = list_entry(pos, struct sctp_af, list); 175 af = list_entry(pos, struct sctp_af, list);
176 af->copy_addrlist(&sctp_local_addr_list, dev); 176 af->copy_addrlist(&sctp_local_addr_list, dev);
@@ -975,28 +975,14 @@ SCTP_STATIC __init int sctp_init(void)
975 if (!sctp_sanity_check()) 975 if (!sctp_sanity_check())
976 goto out; 976 goto out;
977 977
978 status = proto_register(&sctp_prot, 1); 978 /* Allocate bind_bucket and chunk caches. */
979 if (status)
980 goto out;
981
982 /* Add SCTP to inet_protos hash table. */
983 status = -EAGAIN;
984 if (inet_add_protocol(&sctp_protocol, IPPROTO_SCTP) < 0)
985 goto err_add_protocol;
986
987 /* Add SCTP(TCP and UDP style) to inetsw linked list. */
988 inet_register_protosw(&sctp_seqpacket_protosw);
989 inet_register_protosw(&sctp_stream_protosw);
990
991 /* Allocate a cache pools. */
992 status = -ENOBUFS; 979 status = -ENOBUFS;
993 sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket", 980 sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket",
994 sizeof(struct sctp_bind_bucket), 981 sizeof(struct sctp_bind_bucket),
995 0, SLAB_HWCACHE_ALIGN, 982 0, SLAB_HWCACHE_ALIGN,
996 NULL, NULL); 983 NULL, NULL);
997
998 if (!sctp_bucket_cachep) 984 if (!sctp_bucket_cachep)
999 goto err_bucket_cachep; 985 goto out;
1000 986
1001 sctp_chunk_cachep = kmem_cache_create("sctp_chunk", 987 sctp_chunk_cachep = kmem_cache_create("sctp_chunk",
1002 sizeof(struct sctp_chunk), 988 sizeof(struct sctp_chunk),
@@ -1153,6 +1139,14 @@ SCTP_STATIC __init int sctp_init(void)
1153 INIT_LIST_HEAD(&sctp_address_families); 1139 INIT_LIST_HEAD(&sctp_address_families);
1154 sctp_register_af(&sctp_ipv4_specific); 1140 sctp_register_af(&sctp_ipv4_specific);
1155 1141
1142 status = proto_register(&sctp_prot, 1);
1143 if (status)
1144 goto err_proto_register;
1145
1146 /* Register SCTP(UDP and TCP style) with socket layer. */
1147 inet_register_protosw(&sctp_seqpacket_protosw);
1148 inet_register_protosw(&sctp_stream_protosw);
1149
1156 status = sctp_v6_init(); 1150 status = sctp_v6_init();
1157 if (status) 1151 if (status)
1158 goto err_v6_init; 1152 goto err_v6_init;
@@ -1166,19 +1160,39 @@ SCTP_STATIC __init int sctp_init(void)
1166 1160
1167 /* Initialize the local address list. */ 1161 /* Initialize the local address list. */
1168 INIT_LIST_HEAD(&sctp_local_addr_list); 1162 INIT_LIST_HEAD(&sctp_local_addr_list);
1169
1170 sctp_get_local_addr_list(); 1163 sctp_get_local_addr_list();
1171 1164
1172 /* Register notifier for inet address additions/deletions. */ 1165 /* Register notifier for inet address additions/deletions. */
1173 register_inetaddr_notifier(&sctp_inetaddr_notifier); 1166 register_inetaddr_notifier(&sctp_inetaddr_notifier);
1174 1167
1168 /* Register SCTP with inet layer. */
1169 if (inet_add_protocol(&sctp_protocol, IPPROTO_SCTP) < 0) {
1170 status = -EAGAIN;
1171 goto err_add_protocol;
1172 }
1173
1174 /* Register SCTP with inet6 layer. */
1175 status = sctp_v6_add_protocol();
1176 if (status)
1177 goto err_v6_add_protocol;
1178
1175 __unsafe(THIS_MODULE); 1179 __unsafe(THIS_MODULE);
1176 status = 0; 1180 status = 0;
1177out: 1181out:
1178 return status; 1182 return status;
1183err_v6_add_protocol:
1184 inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
1185 unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
1186err_add_protocol:
1187 sctp_free_local_addr_list();
1188 sock_release(sctp_ctl_socket);
1179err_ctl_sock_init: 1189err_ctl_sock_init:
1180 sctp_v6_exit(); 1190 sctp_v6_exit();
1181err_v6_init: 1191err_v6_init:
1192 inet_unregister_protosw(&sctp_stream_protosw);
1193 inet_unregister_protosw(&sctp_seqpacket_protosw);
1194 proto_unregister(&sctp_prot);
1195err_proto_register:
1182 sctp_sysctl_unregister(); 1196 sctp_sysctl_unregister();
1183 list_del(&sctp_ipv4_specific.list); 1197 list_del(&sctp_ipv4_specific.list);
1184 free_pages((unsigned long)sctp_port_hashtable, 1198 free_pages((unsigned long)sctp_port_hashtable,
@@ -1192,19 +1206,13 @@ err_ehash_alloc:
1192 sizeof(struct sctp_hashbucket))); 1206 sizeof(struct sctp_hashbucket)));
1193err_ahash_alloc: 1207err_ahash_alloc:
1194 sctp_dbg_objcnt_exit(); 1208 sctp_dbg_objcnt_exit();
1195err_init_proc:
1196 sctp_proc_exit(); 1209 sctp_proc_exit();
1210err_init_proc:
1197 cleanup_sctp_mibs(); 1211 cleanup_sctp_mibs();
1198err_init_mibs: 1212err_init_mibs:
1199 kmem_cache_destroy(sctp_chunk_cachep); 1213 kmem_cache_destroy(sctp_chunk_cachep);
1200err_chunk_cachep: 1214err_chunk_cachep:
1201 kmem_cache_destroy(sctp_bucket_cachep); 1215 kmem_cache_destroy(sctp_bucket_cachep);
1202err_bucket_cachep:
1203 inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
1204 inet_unregister_protosw(&sctp_seqpacket_protosw);
1205 inet_unregister_protosw(&sctp_stream_protosw);
1206err_add_protocol:
1207 proto_unregister(&sctp_prot);
1208 goto out; 1216 goto out;
1209} 1217}
1210 1218
@@ -1215,8 +1223,9 @@ SCTP_STATIC __exit void sctp_exit(void)
1215 * up all the remaining associations and all that memory. 1223 * up all the remaining associations and all that memory.
1216 */ 1224 */
1217 1225
1218 /* Unregister notifier for inet address additions/deletions. */ 1226 /* Unregister with inet6/inet layers. */
1219 unregister_inetaddr_notifier(&sctp_inetaddr_notifier); 1227 sctp_v6_del_protocol();
1228 inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
1220 1229
1221 /* Free the local address list. */ 1230 /* Free the local address list. */
1222 sctp_free_local_addr_list(); 1231 sctp_free_local_addr_list();
@@ -1224,7 +1233,16 @@ SCTP_STATIC __exit void sctp_exit(void)
1224 /* Free the control endpoint. */ 1233 /* Free the control endpoint. */
1225 sock_release(sctp_ctl_socket); 1234 sock_release(sctp_ctl_socket);
1226 1235
1236 /* Cleanup v6 initializations. */
1227 sctp_v6_exit(); 1237 sctp_v6_exit();
1238
1239 /* Unregister with socket layer. */
1240 inet_unregister_protosw(&sctp_stream_protosw);
1241 inet_unregister_protosw(&sctp_seqpacket_protosw);
1242
1243 /* Unregister notifier for inet address additions/deletions. */
1244 unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
1245
1228 sctp_sysctl_unregister(); 1246 sctp_sysctl_unregister();
1229 list_del(&sctp_ipv4_specific.list); 1247 list_del(&sctp_ipv4_specific.list);
1230 1248
@@ -1236,16 +1254,13 @@ SCTP_STATIC __exit void sctp_exit(void)
1236 get_order(sctp_port_hashsize * 1254 get_order(sctp_port_hashsize *
1237 sizeof(struct sctp_bind_hashbucket))); 1255 sizeof(struct sctp_bind_hashbucket)));
1238 1256
1239 kmem_cache_destroy(sctp_chunk_cachep);
1240 kmem_cache_destroy(sctp_bucket_cachep);
1241
1242 sctp_dbg_objcnt_exit(); 1257 sctp_dbg_objcnt_exit();
1243 sctp_proc_exit(); 1258 sctp_proc_exit();
1244 cleanup_sctp_mibs(); 1259 cleanup_sctp_mibs();
1245 1260
1246 inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); 1261 kmem_cache_destroy(sctp_chunk_cachep);
1247 inet_unregister_protosw(&sctp_seqpacket_protosw); 1262 kmem_cache_destroy(sctp_bucket_cachep);
1248 inet_unregister_protosw(&sctp_stream_protosw); 1263
1249 proto_unregister(&sctp_prot); 1264 proto_unregister(&sctp_prot);
1250} 1265}
1251 1266
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index be783a3761c4..8d18f570c2e6 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1939,7 +1939,6 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
1939 * association. 1939 * association.
1940 */ 1940 */
1941 if (!asoc->temp) { 1941 if (!asoc->temp) {
1942 int assoc_id;
1943 int error; 1942 int error;
1944 1943
1945 asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams, 1944 asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams,
@@ -1947,19 +1946,9 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
1947 if (!asoc->ssnmap) 1946 if (!asoc->ssnmap)
1948 goto clean_up; 1947 goto clean_up;
1949 1948
1950 retry: 1949 error = sctp_assoc_set_id(asoc, gfp);
1951 if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp))) 1950 if (error)
1952 goto clean_up; 1951 goto clean_up;
1953 spin_lock_bh(&sctp_assocs_id_lock);
1954 error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, 1,
1955 &assoc_id);
1956 spin_unlock_bh(&sctp_assocs_id_lock);
1957 if (error == -EAGAIN)
1958 goto retry;
1959 else if (error)
1960 goto clean_up;
1961
1962 asoc->assoc_id = (sctp_assoc_t) assoc_id;
1963 } 1952 }
1964 1953
1965 /* ADDIP Section 4.1 ASCONF Chunk Procedures 1954 /* ADDIP Section 4.1 ASCONF Chunk Procedures
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index b37a7adeb150..d9fad4f6ffc3 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -862,6 +862,33 @@ static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
862 sk->sk_err = error; 862 sk->sk_err = error;
863} 863}
864 864
865/* Helper function to generate an association change event */
866static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands,
867 struct sctp_association *asoc,
868 u8 state)
869{
870 struct sctp_ulpevent *ev;
871
872 ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0,
873 asoc->c.sinit_num_ostreams,
874 asoc->c.sinit_max_instreams,
875 NULL, GFP_ATOMIC);
876 if (ev)
877 sctp_ulpq_tail_event(&asoc->ulpq, ev);
878}
879
880/* Helper function to generate an adaptation indication event */
881static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands,
882 struct sctp_association *asoc)
883{
884 struct sctp_ulpevent *ev;
885
886 ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
887
888 if (ev)
889 sctp_ulpq_tail_event(&asoc->ulpq, ev);
890}
891
865/* These three macros allow us to pull the debugging code out of the 892/* These three macros allow us to pull the debugging code out of the
866 * main flow of sctp_do_sm() to keep attention focused on the real 893 * main flow of sctp_do_sm() to keep attention focused on the real
867 * functionality there. 894 * functionality there.
@@ -1485,6 +1512,14 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1485 case SCTP_CMD_SET_SK_ERR: 1512 case SCTP_CMD_SET_SK_ERR:
1486 sctp_cmd_set_sk_err(asoc, cmd->obj.error); 1513 sctp_cmd_set_sk_err(asoc, cmd->obj.error);
1487 break; 1514 break;
1515 case SCTP_CMD_ASSOC_CHANGE:
1516 sctp_cmd_assoc_change(commands, asoc,
1517 cmd->obj.u8);
1518 break;
1519 case SCTP_CMD_ADAPTATION_IND:
1520 sctp_cmd_adaptation_ind(commands, asoc);
1521 break;
1522
1488 default: 1523 default:
1489 printk(KERN_WARNING "Impossible command: %u, %p\n", 1524 printk(KERN_WARNING "Impossible command: %u, %p\n",
1490 cmd->verb, cmd->obj.ptr); 1525 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 9e28a5d51200..f02ce3dddb7b 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1656,7 +1656,6 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
1656 struct sctp_association *new_asoc) 1656 struct sctp_association *new_asoc)
1657{ 1657{
1658 sctp_init_chunk_t *peer_init; 1658 sctp_init_chunk_t *peer_init;
1659 struct sctp_ulpevent *ev;
1660 struct sctp_chunk *repl; 1659 struct sctp_chunk *repl;
1661 1660
1662 /* new_asoc is a brand-new association, so these are not yet 1661 /* new_asoc is a brand-new association, so these are not yet
@@ -1687,34 +1686,28 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
1687 * D) IMPLEMENTATION NOTE: An implementation may choose to 1686 * D) IMPLEMENTATION NOTE: An implementation may choose to
1688 * send the Communication Up notification to the SCTP user 1687 * send the Communication Up notification to the SCTP user
1689 * upon reception of a valid COOKIE ECHO chunk. 1688 * upon reception of a valid COOKIE ECHO chunk.
1689 *
1690 * Sadly, this needs to be implemented as a side-effect, because
1691 * we are not guaranteed to have set the association id of the real
1692 * association and so these notifications need to be delayed until
1693 * the association id is allocated.
1690 */ 1694 */
1691 ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, 0,
1692 new_asoc->c.sinit_num_ostreams,
1693 new_asoc->c.sinit_max_instreams,
1694 NULL, GFP_ATOMIC);
1695 if (!ev)
1696 goto nomem_ev;
1697 1695
1698 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 1696 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_CHANGE, SCTP_U8(SCTP_COMM_UP));
1699 1697
1700 /* Sockets API Draft Section 5.3.1.6 1698 /* Sockets API Draft Section 5.3.1.6
1701 * When a peer sends a Adaptation Layer Indication parameter , SCTP 1699 * When a peer sends a Adaptation Layer Indication parameter , SCTP
1702 * delivers this notification to inform the application that of the 1700 * delivers this notification to inform the application that of the
1703 * peers requested adaptation layer. 1701 * peers requested adaptation layer.
1702 *
1703 * This also needs to be done as a side effect for the same reason as
1704 * above.
1704 */ 1705 */
1705 if (asoc->peer.adaptation_ind) { 1706 if (asoc->peer.adaptation_ind)
1706 ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); 1707 sctp_add_cmd_sf(commands, SCTP_CMD_ADAPTATION_IND, SCTP_NULL());
1707 if (!ev)
1708 goto nomem_ev;
1709
1710 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
1711 SCTP_ULPEVENT(ev));
1712 }
1713 1708
1714 return SCTP_DISPOSITION_CONSUME; 1709 return SCTP_DISPOSITION_CONSUME;
1715 1710
1716nomem_ev:
1717 sctp_chunk_free(repl);
1718nomem: 1711nomem:
1719 return SCTP_DISPOSITION_NOMEM; 1712 return SCTP_DISPOSITION_NOMEM;
1720} 1713}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 2fc0a92caa78..9f1a908776de 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -972,6 +972,7 @@ static int __sctp_connect(struct sock* sk,
972 int walk_size = 0; 972 int walk_size = 0;
973 union sctp_addr *sa_addr; 973 union sctp_addr *sa_addr;
974 void *addr_buf; 974 void *addr_buf;
975 unsigned short port;
975 976
976 sp = sctp_sk(sk); 977 sp = sctp_sk(sk);
977 ep = sp->ep; 978 ep = sp->ep;
@@ -992,6 +993,7 @@ static int __sctp_connect(struct sock* sk,
992 while (walk_size < addrs_size) { 993 while (walk_size < addrs_size) {
993 sa_addr = (union sctp_addr *)addr_buf; 994 sa_addr = (union sctp_addr *)addr_buf;
994 af = sctp_get_af_specific(sa_addr->sa.sa_family); 995 af = sctp_get_af_specific(sa_addr->sa.sa_family);
996 port = ntohs(sa_addr->v4.sin_port);
995 997
996 /* If the address family is not supported or if this address 998 /* If the address family is not supported or if this address
997 * causes the address buffer to overflow return EINVAL. 999 * causes the address buffer to overflow return EINVAL.
@@ -1005,6 +1007,12 @@ static int __sctp_connect(struct sock* sk,
1005 if (err) 1007 if (err)
1006 goto out_free; 1008 goto out_free;
1007 1009
1010 /* Make sure the destination port is correctly set
1011 * in all addresses.
1012 */
1013 if (asoc && asoc->peer.port && asoc->peer.port != port)
1014 goto out_free;
1015
1008 memcpy(&to, sa_addr, af->sockaddr_len); 1016 memcpy(&to, sa_addr, af->sockaddr_len);
1009 1017
1010 /* Check if there already is a matching association on the 1018 /* Check if there already is a matching association on the
@@ -5012,7 +5020,8 @@ pp_found:
5012 struct hlist_node *node; 5020 struct hlist_node *node;
5013 5021
5014 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); 5022 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
5015 if (pp->fastreuse && sk->sk_reuse) 5023 if (pp->fastreuse && sk->sk_reuse &&
5024 sk->sk_state != SCTP_SS_LISTENING)
5016 goto success; 5025 goto success;
5017 5026
5018 /* Run through the list of sockets bound to the port 5027 /* Run through the list of sockets bound to the port
@@ -5029,7 +5038,8 @@ pp_found:
5029 struct sctp_endpoint *ep2; 5038 struct sctp_endpoint *ep2;
5030 ep2 = sctp_sk(sk2)->ep; 5039 ep2 = sctp_sk(sk2)->ep;
5031 5040
5032 if (reuse && sk2->sk_reuse) 5041 if (reuse && sk2->sk_reuse &&
5042 sk2->sk_state != SCTP_SS_LISTENING)
5033 continue; 5043 continue;
5034 5044
5035 if (sctp_bind_addr_match(&ep2->base.bind_addr, addr, 5045 if (sctp_bind_addr_match(&ep2->base.bind_addr, addr,
@@ -5050,9 +5060,13 @@ pp_not_found:
5050 * if sk->sk_reuse is too (that is, if the caller requested 5060 * if sk->sk_reuse is too (that is, if the caller requested
5051 * SO_REUSEADDR on this socket -sk-). 5061 * SO_REUSEADDR on this socket -sk-).
5052 */ 5062 */
5053 if (hlist_empty(&pp->owner)) 5063 if (hlist_empty(&pp->owner)) {
5054 pp->fastreuse = sk->sk_reuse ? 1 : 0; 5064 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING)
5055 else if (pp->fastreuse && !sk->sk_reuse) 5065 pp->fastreuse = 1;
5066 else
5067 pp->fastreuse = 0;
5068 } else if (pp->fastreuse &&
5069 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING))
5056 pp->fastreuse = 0; 5070 pp->fastreuse = 0;
5057 5071
5058 /* We are set, so fill up all the data in the hash table 5072 /* We are set, so fill up all the data in the hash table
@@ -5060,8 +5074,8 @@ pp_not_found:
5060 * sockets FIXME: Blurry, NPI (ipg). 5074 * sockets FIXME: Blurry, NPI (ipg).
5061 */ 5075 */
5062success: 5076success:
5063 inet_sk(sk)->num = snum;
5064 if (!sctp_sk(sk)->bind_hash) { 5077 if (!sctp_sk(sk)->bind_hash) {
5078 inet_sk(sk)->num = snum;
5065 sk_add_bind_node(sk, &pp->owner); 5079 sk_add_bind_node(sk, &pp->owner);
5066 sctp_sk(sk)->bind_hash = pp; 5080 sctp_sk(sk)->bind_hash = pp;
5067 } 5081 }
@@ -5134,12 +5148,16 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog)
5134 * This is not currently spelled out in the SCTP sockets 5148 * This is not currently spelled out in the SCTP sockets
5135 * extensions draft, but follows the practice as seen in TCP 5149 * extensions draft, but follows the practice as seen in TCP
5136 * sockets. 5150 * sockets.
5151 *
5152 * Additionally, turn off fastreuse flag since we are not listening
5137 */ 5153 */
5154 sk->sk_state = SCTP_SS_LISTENING;
5138 if (!ep->base.bind_addr.port) { 5155 if (!ep->base.bind_addr.port) {
5139 if (sctp_autobind(sk)) 5156 if (sctp_autobind(sk))
5140 return -EAGAIN; 5157 return -EAGAIN;
5141 } 5158 } else
5142 sk->sk_state = SCTP_SS_LISTENING; 5159 sctp_sk(sk)->bind_hash->fastreuse = 0;
5160
5143 sctp_hash_endpoint(ep); 5161 sctp_hash_endpoint(ep);
5144 return 0; 5162 return 0;
5145} 5163}
@@ -5177,11 +5195,13 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
5177 * extensions draft, but follows the practice as seen in TCP 5195 * extensions draft, but follows the practice as seen in TCP
5178 * sockets. 5196 * sockets.
5179 */ 5197 */
5198 sk->sk_state = SCTP_SS_LISTENING;
5180 if (!ep->base.bind_addr.port) { 5199 if (!ep->base.bind_addr.port) {
5181 if (sctp_autobind(sk)) 5200 if (sctp_autobind(sk))
5182 return -EAGAIN; 5201 return -EAGAIN;
5183 } 5202 } else
5184 sk->sk_state = SCTP_SS_LISTENING; 5203 sctp_sk(sk)->bind_hash->fastreuse = 0;
5204
5185 sk->sk_max_ack_backlog = backlog; 5205 sk->sk_max_ack_backlog = backlog;
5186 sctp_hash_endpoint(ep); 5206 sctp_hash_endpoint(ep);
5187 return 0; 5207 return 0;
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 67bb29b44d1b..0ee6ded18f3a 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -120,16 +120,18 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
120 120
121static int enable_bearer(struct tipc_bearer *tb_ptr) 121static int enable_bearer(struct tipc_bearer *tb_ptr)
122{ 122{
123 struct net_device *dev = dev_base; 123 struct net_device *dev, *pdev;
124 struct eth_bearer *eb_ptr = &eth_bearers[0]; 124 struct eth_bearer *eb_ptr = &eth_bearers[0];
125 struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS]; 125 struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
126 char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1; 126 char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
127 127
128 /* Find device with specified name */ 128 /* Find device with specified name */
129 129 dev = NULL;
130 while (dev && dev->name && strncmp(dev->name, driver_name, IFNAMSIZ)) { 130 for_each_netdev(pdev)
131 dev = dev->next; 131 if (!strncmp(dev->name, driver_name, IFNAMSIZ)) {
132 } 132 dev = pdev;
133 break;
134 }
133 if (!dev) 135 if (!dev)
134 return -ENODEV; 136 return -ENODEV;
135 137
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 263e34e45265..95271e8426a1 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -579,7 +579,7 @@ static inline int xfrm_byidx_should_resize(int total)
579 return 0; 579 return 0;
580} 580}
581 581
582void xfrm_spd_getinfo(struct xfrm_spdinfo *si) 582void xfrm_spd_getinfo(struct xfrmk_spdinfo *si)
583{ 583{
584 read_lock_bh(&xfrm_policy_lock); 584 read_lock_bh(&xfrm_policy_lock);
585 si->incnt = xfrm_policy_count[XFRM_POLICY_IN]; 585 si->incnt = xfrm_policy_count[XFRM_POLICY_IN];
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index f3a61ebd8d65..9955ff4da0a2 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -421,7 +421,7 @@ restart:
421} 421}
422EXPORT_SYMBOL(xfrm_state_flush); 422EXPORT_SYMBOL(xfrm_state_flush);
423 423
424void xfrm_sad_getinfo(struct xfrm_sadinfo *si) 424void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
425{ 425{
426 spin_lock_bh(&xfrm_state_lock); 426 spin_lock_bh(&xfrm_state_lock);
427 si->sadcnt = xfrm_state_num; 427 si->sadcnt = xfrm_state_num;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 4210d91624cd..b14c7e590c31 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -674,7 +674,9 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
674 674
675static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 675static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
676{ 676{
677 struct xfrm_spdinfo si; 677 struct xfrmk_spdinfo si;
678 struct xfrmu_spdinfo spc;
679 struct xfrmu_spdhinfo sph;
678 struct nlmsghdr *nlh; 680 struct nlmsghdr *nlh;
679 u32 *f; 681 u32 *f;
680 682
@@ -685,23 +687,17 @@ static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
685 f = nlmsg_data(nlh); 687 f = nlmsg_data(nlh);
686 *f = flags; 688 *f = flags;
687 xfrm_spd_getinfo(&si); 689 xfrm_spd_getinfo(&si);
688 690 spc.incnt = si.incnt;
689 if (flags & XFRM_SPD_HMASK) 691 spc.outcnt = si.outcnt;
690 NLA_PUT_U32(skb, XFRMA_SPDHMASK, si.spdhcnt); 692 spc.fwdcnt = si.fwdcnt;
691 if (flags & XFRM_SPD_HMAX) 693 spc.inscnt = si.inscnt;
692 NLA_PUT_U32(skb, XFRMA_SPDHMAX, si.spdhmcnt); 694 spc.outscnt = si.outscnt;
693 if (flags & XFRM_SPD_ICNT) 695 spc.fwdscnt = si.fwdscnt;
694 NLA_PUT_U32(skb, XFRMA_SPDICNT, si.incnt); 696 sph.spdhcnt = si.spdhcnt;
695 if (flags & XFRM_SPD_OCNT) 697 sph.spdhmcnt = si.spdhmcnt;
696 NLA_PUT_U32(skb, XFRMA_SPDOCNT, si.outcnt); 698
697 if (flags & XFRM_SPD_FCNT) 699 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
698 NLA_PUT_U32(skb, XFRMA_SPDFCNT, si.fwdcnt); 700 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
699 if (flags & XFRM_SPD_ISCNT)
700 NLA_PUT_U32(skb, XFRMA_SPDISCNT, si.inscnt);
701 if (flags & XFRM_SPD_OSCNT)
702 NLA_PUT_U32(skb, XFRMA_SPDOSCNT, si.inscnt);
703 if (flags & XFRM_SPD_FSCNT)
704 NLA_PUT_U32(skb, XFRMA_SPDFSCNT, si.inscnt);
705 701
706 return nlmsg_end(skb, nlh); 702 return nlmsg_end(skb, nlh);
707 703
@@ -719,23 +715,8 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
719 u32 seq = nlh->nlmsg_seq; 715 u32 seq = nlh->nlmsg_seq;
720 int len = NLMSG_LENGTH(sizeof(u32)); 716 int len = NLMSG_LENGTH(sizeof(u32));
721 717
722 718 len += RTA_SPACE(sizeof(struct xfrmu_spdinfo));
723 if (*flags & XFRM_SPD_HMASK) 719 len += RTA_SPACE(sizeof(struct xfrmu_spdhinfo));
724 len += RTA_SPACE(sizeof(u32));
725 if (*flags & XFRM_SPD_HMAX)
726 len += RTA_SPACE(sizeof(u32));
727 if (*flags & XFRM_SPD_ICNT)
728 len += RTA_SPACE(sizeof(u32));
729 if (*flags & XFRM_SPD_OCNT)
730 len += RTA_SPACE(sizeof(u32));
731 if (*flags & XFRM_SPD_FCNT)
732 len += RTA_SPACE(sizeof(u32));
733 if (*flags & XFRM_SPD_ISCNT)
734 len += RTA_SPACE(sizeof(u32));
735 if (*flags & XFRM_SPD_OSCNT)
736 len += RTA_SPACE(sizeof(u32));
737 if (*flags & XFRM_SPD_FSCNT)
738 len += RTA_SPACE(sizeof(u32));
739 720
740 r_skb = alloc_skb(len, GFP_ATOMIC); 721 r_skb = alloc_skb(len, GFP_ATOMIC);
741 if (r_skb == NULL) 722 if (r_skb == NULL)
@@ -749,7 +730,8 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
749 730
750static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 731static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
751{ 732{
752 struct xfrm_sadinfo si; 733 struct xfrmk_sadinfo si;
734 struct xfrmu_sadhinfo sh;
753 struct nlmsghdr *nlh; 735 struct nlmsghdr *nlh;
754 u32 *f; 736 u32 *f;
755 737
@@ -761,12 +743,11 @@ static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
761 *f = flags; 743 *f = flags;
762 xfrm_sad_getinfo(&si); 744 xfrm_sad_getinfo(&si);
763 745
764 if (flags & XFRM_SAD_HMASK) 746 sh.sadhmcnt = si.sadhmcnt;
765 NLA_PUT_U32(skb, XFRMA_SADHMASK, si.sadhcnt); 747 sh.sadhcnt = si.sadhcnt;
766 if (flags & XFRM_SAD_HMAX) 748
767 NLA_PUT_U32(skb, XFRMA_SADHMAX, si.sadhmcnt); 749 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
768 if (flags & XFRM_SAD_CNT) 750 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
769 NLA_PUT_U32(skb, XFRMA_SADCNT, si.sadcnt);
770 751
771 return nlmsg_end(skb, nlh); 752 return nlmsg_end(skb, nlh);
772 753
@@ -784,12 +765,8 @@ static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
784 u32 seq = nlh->nlmsg_seq; 765 u32 seq = nlh->nlmsg_seq;
785 int len = NLMSG_LENGTH(sizeof(u32)); 766 int len = NLMSG_LENGTH(sizeof(u32));
786 767
787 if (*flags & XFRM_SAD_HMASK) 768 len += RTA_SPACE(sizeof(struct xfrmu_sadhinfo));
788 len += RTA_SPACE(sizeof(u32)); 769 len += RTA_SPACE(sizeof(u32));
789 if (*flags & XFRM_SAD_HMAX)
790 len += RTA_SPACE(sizeof(u32));
791 if (*flags & XFRM_SAD_CNT)
792 len += RTA_SPACE(sizeof(u32));
793 770
794 r_skb = alloc_skb(len, GFP_ATOMIC); 771 r_skb = alloc_skb(len, GFP_ATOMIC);
795 772