aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-09-19 20:09:27 -0400
committerPaul Mackerras <paulus@samba.org>2007-09-19 20:09:27 -0400
commit0ce49a3945474fc942ec37c0c0efece60f592f80 (patch)
treef42b821b2d9e2d8775bc22f56d444c2cc7b7b7dd /net
parent9e4859ef5462193643fd2b3c8ffb298e5a4a4319 (diff)
parenta88a8eff1e6e32d3288986a9d36c6a449c032d3a (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_forward.c21
-rw-r--r--net/bridge/br_input.c48
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/bridge/br_private.h8
-rw-r--r--net/core/pktgen.c10
-rw-r--r--net/core/sock.c106
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/sched/act_api.c8
-rw-r--r--net/sched/act_police.c4
-rw-r--r--net/sched/sch_cbq.c2
-rw-r--r--net/sctp/associola.c14
-rw-r--r--net/sctp/bind_addr.c70
-rw-r--r--net/sctp/endpointola.c27
-rw-r--r--net/sctp/ipv6.c46
-rw-r--r--net/sctp/protocol.c79
-rw-r--r--net/sctp/sm_make_chunk.c18
-rw-r--r--net/sctp/socket.c134
-rw-r--r--net/sunrpc/svcsock.c2
23 files changed, 308 insertions, 314 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 1583c5ef963f..2a546919d6fb 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -562,8 +562,6 @@ static int register_vlan_device(struct net_device *real_dev,
562 if (err < 0) 562 if (err < 0)
563 goto out_free_newdev; 563 goto out_free_newdev;
564 564
565 /* Account for reference in struct vlan_dev_info */
566 dev_hold(real_dev);
567#ifdef VLAN_DEBUG 565#ifdef VLAN_DEBUG
568 printk(VLAN_DBG "Allocated new device successfully, returning.\n"); 566 printk(VLAN_DBG "Allocated new device successfully, returning.\n");
569#endif 567#endif
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 0eded176ce99..99292e8e1d0f 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -41,11 +41,11 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
41 skb_pull(skb, ETH_HLEN); 41 skb_pull(skb, ETH_HLEN);
42 42
43 if (dest[0] & 1) 43 if (dest[0] & 1)
44 br_flood_deliver(br, skb, 0); 44 br_flood_deliver(br, skb);
45 else if ((dst = __br_fdb_get(br, dest)) != NULL) 45 else if ((dst = __br_fdb_get(br, dest)) != NULL)
46 br_deliver(dst->dst, skb); 46 br_deliver(dst->dst, skb);
47 else 47 else
48 br_flood_deliver(br, skb, 0); 48 br_flood_deliver(br, skb);
49 49
50 return 0; 50 return 0;
51} 51}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index ada7f495445c..bdd7c35c3c7b 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -100,24 +100,13 @@ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
100} 100}
101 101
102/* called under bridge lock */ 102/* called under bridge lock */
103static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone, 103static void br_flood(struct net_bridge *br, struct sk_buff *skb,
104 void (*__packet_hook)(const struct net_bridge_port *p, 104 void (*__packet_hook)(const struct net_bridge_port *p,
105 struct sk_buff *skb)) 105 struct sk_buff *skb))
106{ 106{
107 struct net_bridge_port *p; 107 struct net_bridge_port *p;
108 struct net_bridge_port *prev; 108 struct net_bridge_port *prev;
109 109
110 if (clone) {
111 struct sk_buff *skb2;
112
113 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
114 br->statistics.tx_dropped++;
115 return;
116 }
117
118 skb = skb2;
119 }
120
121 prev = NULL; 110 prev = NULL;
122 111
123 list_for_each_entry_rcu(p, &br->port_list, list) { 112 list_for_each_entry_rcu(p, &br->port_list, list) {
@@ -148,13 +137,13 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone,
148 137
149 138
150/* called with rcu_read_lock */ 139/* called with rcu_read_lock */
151void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, int clone) 140void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
152{ 141{
153 br_flood(br, skb, clone, __br_deliver); 142 br_flood(br, skb, __br_deliver);
154} 143}
155 144
156/* called under bridge lock */ 145/* called under bridge lock */
157void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, int clone) 146void br_flood_forward(struct net_bridge *br, struct sk_buff *skb)
158{ 147{
159 br_flood(br, skb, clone, __br_forward); 148 br_flood(br, skb, __br_forward);
160} 149}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 6f468fc3357a..3a8a015c92e0 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -43,7 +43,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
43 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); 43 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
44 struct net_bridge *br; 44 struct net_bridge *br;
45 struct net_bridge_fdb_entry *dst; 45 struct net_bridge_fdb_entry *dst;
46 int passedup = 0; 46 struct sk_buff *skb2;
47 47
48 if (!p || p->state == BR_STATE_DISABLED) 48 if (!p || p->state == BR_STATE_DISABLED)
49 goto drop; 49 goto drop;
@@ -55,39 +55,35 @@ int br_handle_frame_finish(struct sk_buff *skb)
55 if (p->state == BR_STATE_LEARNING) 55 if (p->state == BR_STATE_LEARNING)
56 goto drop; 56 goto drop;
57 57
58 if (br->dev->flags & IFF_PROMISC) { 58 /* The packet skb2 goes to the local host (NULL to skip). */
59 struct sk_buff *skb2; 59 skb2 = NULL;
60 60
61 skb2 = skb_clone(skb, GFP_ATOMIC); 61 if (br->dev->flags & IFF_PROMISC)
62 if (skb2 != NULL) { 62 skb2 = skb;
63 passedup = 1; 63
64 br_pass_frame_up(br, skb2); 64 dst = NULL;
65 }
66 }
67 65
68 if (is_multicast_ether_addr(dest)) { 66 if (is_multicast_ether_addr(dest)) {
69 br->statistics.multicast++; 67 br->statistics.multicast++;
70 br_flood_forward(br, skb, !passedup); 68 skb2 = skb;
71 if (!passedup) 69 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
72 br_pass_frame_up(br, skb); 70 skb2 = skb;
73 goto out; 71 /* Do not forward the packet since it's local. */
72 skb = NULL;
74 } 73 }
75 74
76 dst = __br_fdb_get(br, dest); 75 if (skb2 == skb)
77 if (dst != NULL && dst->is_local) { 76 skb2 = skb_clone(skb, GFP_ATOMIC);
78 if (!passedup)
79 br_pass_frame_up(br, skb);
80 else
81 kfree_skb(skb);
82 goto out;
83 }
84 77
85 if (dst != NULL) { 78 if (skb2)
86 br_forward(dst->dst, skb); 79 br_pass_frame_up(br, skb2);
87 goto out;
88 }
89 80
90 br_flood_forward(br, skb, 0); 81 if (skb) {
82 if (dst)
83 br_forward(dst->dst, skb);
84 else
85 br_flood_forward(br, skb);
86 }
91 87
92out: 88out:
93 return 0; 89 return 0;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 3ee2022928e3..fc13130035e7 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -183,7 +183,7 @@ int nf_bridge_copy_header(struct sk_buff *skb)
183 int err; 183 int err;
184 int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); 184 int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
185 185
186 err = skb_cow(skb, header_size); 186 err = skb_cow_head(skb, header_size);
187 if (err) 187 if (err)
188 return err; 188 return err;
189 189
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 21bf3a9a03fd..e6dc6f52990d 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -170,12 +170,8 @@ extern int br_dev_queue_push_xmit(struct sk_buff *skb);
170extern void br_forward(const struct net_bridge_port *to, 170extern void br_forward(const struct net_bridge_port *to,
171 struct sk_buff *skb); 171 struct sk_buff *skb);
172extern int br_forward_finish(struct sk_buff *skb); 172extern int br_forward_finish(struct sk_buff *skb);
173extern void br_flood_deliver(struct net_bridge *br, 173extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb);
174 struct sk_buff *skb, 174extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb);
175 int clone);
176extern void br_flood_forward(struct net_bridge *br,
177 struct sk_buff *skb,
178 int clone);
179 175
180/* br_if.c */ 176/* br_if.c */
181extern void br_port_carrier_check(struct net_bridge_port *p); 177extern void br_port_carrier_check(struct net_bridge_port *p);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 36fdea71d742..803d0c8826af 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -111,6 +111,9 @@
111 * 111 *
112 * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com> 112 * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com>
113 * 113 *
114 * Fixed src_mac command to set source mac of packet to value specified in
115 * command by Adit Ranadive <adit.262@gmail.com>
116 *
114 */ 117 */
115#include <linux/sys.h> 118#include <linux/sys.h>
116#include <linux/types.h> 119#include <linux/types.h>
@@ -1451,8 +1454,11 @@ static ssize_t pktgen_if_write(struct file *file,
1451 } 1454 }
1452 if (!strcmp(name, "src_mac")) { 1455 if (!strcmp(name, "src_mac")) {
1453 char *v = valstr; 1456 char *v = valstr;
1457 unsigned char old_smac[ETH_ALEN];
1454 unsigned char *m = pkt_dev->src_mac; 1458 unsigned char *m = pkt_dev->src_mac;
1455 1459
1460 memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN);
1461
1456 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1462 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1457 if (len < 0) { 1463 if (len < 0) {
1458 return len; 1464 return len;
@@ -1481,6 +1487,10 @@ static ssize_t pktgen_if_write(struct file *file,
1481 } 1487 }
1482 } 1488 }
1483 1489
1490 /* Set up Src MAC */
1491 if (compare_ether_addr(old_smac, pkt_dev->src_mac))
1492 memcpy(&(pkt_dev->hh[6]), pkt_dev->src_mac, ETH_ALEN);
1493
1484 sprintf(pg_result, "OK: srcmac"); 1494 sprintf(pg_result, "OK: srcmac");
1485 return count; 1495 return count;
1486 } 1496 }
diff --git a/net/core/sock.c b/net/core/sock.c
index cfed7d42c485..190de61cd648 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -362,6 +362,61 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
362} 362}
363EXPORT_SYMBOL(sk_dst_check); 363EXPORT_SYMBOL(sk_dst_check);
364 364
365static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
366{
367 int ret = -ENOPROTOOPT;
368#ifdef CONFIG_NETDEVICES
369 char devname[IFNAMSIZ];
370 int index;
371
372 /* Sorry... */
373 ret = -EPERM;
374 if (!capable(CAP_NET_RAW))
375 goto out;
376
377 ret = -EINVAL;
378 if (optlen < 0)
379 goto out;
380
381 /* Bind this socket to a particular device like "eth0",
382 * as specified in the passed interface name. If the
383 * name is "" or the option length is zero the socket
384 * is not bound.
385 */
386 if (optlen > IFNAMSIZ - 1)
387 optlen = IFNAMSIZ - 1;
388 memset(devname, 0, sizeof(devname));
389
390 ret = -EFAULT;
391 if (copy_from_user(devname, optval, optlen))
392 goto out;
393
394 if (devname[0] == '\0') {
395 index = 0;
396 } else {
397 struct net_device *dev = dev_get_by_name(devname);
398
399 ret = -ENODEV;
400 if (!dev)
401 goto out;
402
403 index = dev->ifindex;
404 dev_put(dev);
405 }
406
407 lock_sock(sk);
408 sk->sk_bound_dev_if = index;
409 sk_dst_reset(sk);
410 release_sock(sk);
411
412 ret = 0;
413
414out:
415#endif
416
417 return ret;
418}
419
365/* 420/*
366 * This is meant for all protocols to use and covers goings on 421 * This is meant for all protocols to use and covers goings on
367 * at the socket level. Everything here is generic. 422 * at the socket level. Everything here is generic.
@@ -390,6 +445,9 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
390 } 445 }
391#endif 446#endif
392 447
448 if (optname == SO_BINDTODEVICE)
449 return sock_bindtodevice(sk, optval, optlen);
450
393 if (optlen < sizeof(int)) 451 if (optlen < sizeof(int))
394 return -EINVAL; 452 return -EINVAL;
395 453
@@ -578,54 +636,6 @@ set_rcvbuf:
578 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 636 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
579 break; 637 break;
580 638
581#ifdef CONFIG_NETDEVICES
582 case SO_BINDTODEVICE:
583 {
584 char devname[IFNAMSIZ];
585
586 /* Sorry... */
587 if (!capable(CAP_NET_RAW)) {
588 ret = -EPERM;
589 break;
590 }
591
592 /* Bind this socket to a particular device like "eth0",
593 * as specified in the passed interface name. If the
594 * name is "" or the option length is zero the socket
595 * is not bound.
596 */
597
598 if (!valbool) {
599 sk->sk_bound_dev_if = 0;
600 } else {
601 if (optlen > IFNAMSIZ - 1)
602 optlen = IFNAMSIZ - 1;
603 memset(devname, 0, sizeof(devname));
604 if (copy_from_user(devname, optval, optlen)) {
605 ret = -EFAULT;
606 break;
607 }
608
609 /* Remove any cached route for this socket. */
610 sk_dst_reset(sk);
611
612 if (devname[0] == '\0') {
613 sk->sk_bound_dev_if = 0;
614 } else {
615 struct net_device *dev = dev_get_by_name(devname);
616 if (!dev) {
617 ret = -ENODEV;
618 break;
619 }
620 sk->sk_bound_dev_if = dev->ifindex;
621 dev_put(dev);
622 }
623 }
624 break;
625 }
626#endif
627
628
629 case SO_ATTACH_FILTER: 639 case SO_ATTACH_FILTER:
630 ret = -EINVAL; 640 ret = -EINVAL;
631 if (optlen == sizeof(struct sock_fprog)) { 641 if (optlen == sizeof(struct sock_fprog)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 28355350fb62..69d4bd10f9c6 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -505,6 +505,8 @@ send:
505out: 505out:
506 up->len = 0; 506 up->len = 0;
507 up->pending = 0; 507 up->pending = 0;
508 if (!err)
509 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, up->pcflag);
508 return err; 510 return err;
509} 511}
510 512
@@ -693,10 +695,8 @@ out:
693 ip_rt_put(rt); 695 ip_rt_put(rt);
694 if (free) 696 if (free)
695 kfree(ipc.opt); 697 kfree(ipc.opt);
696 if (!err) { 698 if (!err)
697 UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
698 return len; 699 return len;
699 }
700 /* 700 /*
701 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 701 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
702 * ENOBUFS might not be good (it's not tunable per se), but otherwise 702 * ENOBUFS might not be good (it's not tunable per se), but otherwise
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 91ef3be5abad..45b4c82148a0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1021,7 +1021,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
1021 hiscore.rule++; 1021 hiscore.rule++;
1022 } 1022 }
1023 if (ipv6_saddr_preferred(score.addr_type) || 1023 if (ipv6_saddr_preferred(score.addr_type) ||
1024 (((ifa_result->flags & 1024 (((ifa->flags &
1025 (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)) == 0))) { 1025 (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)) == 0))) {
1026 score.attrs |= IPV6_SADDR_SCORE_PREFERRED; 1026 score.attrs |= IPV6_SADDR_SCORE_PREFERRED;
1027 if (!(hiscore.attrs & IPV6_SADDR_SCORE_PREFERRED)) { 1027 if (!(hiscore.attrs & IPV6_SADDR_SCORE_PREFERRED)) {
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index e27383d855de..77167afa3455 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -882,11 +882,10 @@ back_from_confirm:
882 ip6_flush_pending_frames(sk); 882 ip6_flush_pending_frames(sk);
883 else if (!(msg->msg_flags & MSG_MORE)) 883 else if (!(msg->msg_flags & MSG_MORE))
884 err = rawv6_push_pending_frames(sk, &fl, rp); 884 err = rawv6_push_pending_frames(sk, &fl, rp);
885 release_sock(sk);
885 } 886 }
886done: 887done:
887 dst_release(dst); 888 dst_release(dst);
888 if (!inet->hdrincl)
889 release_sock(sk);
890out: 889out:
891 fl6_sock_release(flowlabel); 890 fl6_sock_release(flowlabel);
892 return err<0?err:len; 891 return err<0?err:len;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4210951edb6e..c347f3e30e2e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -555,6 +555,8 @@ static int udp_v6_push_pending_frames(struct sock *sk)
555out: 555out:
556 up->len = 0; 556 up->len = 0;
557 up->pending = 0; 557 up->pending = 0;
558 if (!err)
559 UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, up->pcflag);
558 return err; 560 return err;
559} 561}
560 562
@@ -823,10 +825,8 @@ do_append_data:
823 release_sock(sk); 825 release_sock(sk);
824out: 826out:
825 fl6_sock_release(flowlabel); 827 fl6_sock_release(flowlabel);
826 if (!err) { 828 if (!err)
827 UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite);
828 return len; 829 return len;
829 }
830 /* 830 /*
831 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 831 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
832 * ENOBUFS might not be good (it's not tunable per se), but otherwise 832 * ENOBUFS might not be good (it's not tunable per se), but otherwise
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index feef366cad5d..72cdb0fade20 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -68,7 +68,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
68 int err = 0, index = -1,i = 0, s_i = 0, n_i = 0; 68 int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
69 struct rtattr *r ; 69 struct rtattr *r ;
70 70
71 read_lock(hinfo->lock); 71 read_lock_bh(hinfo->lock);
72 72
73 s_i = cb->args[0]; 73 s_i = cb->args[0];
74 74
@@ -96,7 +96,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
96 } 96 }
97 } 97 }
98done: 98done:
99 read_unlock(hinfo->lock); 99 read_unlock_bh(hinfo->lock);
100 if (n_i) 100 if (n_i)
101 cb->args[0] += n_i; 101 cb->args[0] += n_i;
102 return n_i; 102 return n_i;
@@ -156,13 +156,13 @@ struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
156{ 156{
157 struct tcf_common *p; 157 struct tcf_common *p;
158 158
159 read_lock(hinfo->lock); 159 read_lock_bh(hinfo->lock);
160 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p; 160 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
161 p = p->tcfc_next) { 161 p = p->tcfc_next) {
162 if (p->tcfc_index == index) 162 if (p->tcfc_index == index)
163 break; 163 break;
164 } 164 }
165 read_unlock(hinfo->lock); 165 read_unlock_bh(hinfo->lock);
166 166
167 return p; 167 return p;
168} 168}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 6085be578459..17f6f27e28a2 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -56,7 +56,7 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c
56 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; 56 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
57 struct rtattr *r; 57 struct rtattr *r;
58 58
59 read_lock(&police_lock); 59 read_lock_bh(&police_lock);
60 60
61 s_i = cb->args[0]; 61 s_i = cb->args[0];
62 62
@@ -85,7 +85,7 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c
85 } 85 }
86 } 86 }
87done: 87done:
88 read_unlock(&police_lock); 88 read_unlock_bh(&police_lock);
89 if (n_i) 89 if (n_i)
90 cb->args[0] += n_i; 90 cb->args[0] += n_i;
91 return n_i; 91 return n_i;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index e38c2839b25c..cbef3bbfc20f 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -380,7 +380,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
380{ 380{
381 struct cbq_sched_data *q = qdisc_priv(sch); 381 struct cbq_sched_data *q = qdisc_priv(sch);
382 int len = skb->len; 382 int len = skb->len;
383 int ret; 383 int uninitialized_var(ret);
384 struct cbq_class *cl = cbq_classify(skb, sch, &ret); 384 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
385 385
386#ifdef CONFIG_NET_CLS_ACT 386#ifdef CONFIG_NET_CLS_ACT
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 2ad1caf1ea42..9bad8ba0feda 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -99,7 +99,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
99 99
100 /* Initialize the bind addr area. */ 100 /* Initialize the bind addr area. */
101 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); 101 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
102 rwlock_init(&asoc->base.addr_lock);
103 102
104 asoc->state = SCTP_STATE_CLOSED; 103 asoc->state = SCTP_STATE_CLOSED;
105 104
@@ -937,8 +936,6 @@ struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
937{ 936{
938 struct sctp_transport *transport; 937 struct sctp_transport *transport;
939 938
940 sctp_read_lock(&asoc->base.addr_lock);
941
942 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) && 939 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
943 (htons(asoc->peer.port) == paddr->v4.sin_port)) { 940 (htons(asoc->peer.port) == paddr->v4.sin_port)) {
944 transport = sctp_assoc_lookup_paddr(asoc, paddr); 941 transport = sctp_assoc_lookup_paddr(asoc, paddr);
@@ -952,7 +949,6 @@ struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
952 transport = NULL; 949 transport = NULL;
953 950
954out: 951out:
955 sctp_read_unlock(&asoc->base.addr_lock);
956 return transport; 952 return transport;
957} 953}
958 954
@@ -1376,19 +1372,13 @@ int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1376int sctp_assoc_lookup_laddr(struct sctp_association *asoc, 1372int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1377 const union sctp_addr *laddr) 1373 const union sctp_addr *laddr)
1378{ 1374{
1379 int found; 1375 int found = 0;
1380 1376
1381 sctp_read_lock(&asoc->base.addr_lock);
1382 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && 1377 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1383 sctp_bind_addr_match(&asoc->base.bind_addr, laddr, 1378 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1384 sctp_sk(asoc->base.sk))) { 1379 sctp_sk(asoc->base.sk)))
1385 found = 1; 1380 found = 1;
1386 goto out;
1387 }
1388 1381
1389 found = 0;
1390out:
1391 sctp_read_unlock(&asoc->base.addr_lock);
1392 return found; 1382 return found;
1393} 1383}
1394 1384
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index fdb287a9e2e2..d35cbf5aae33 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -163,9 +163,15 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
163 addr->a.v4.sin_port = htons(bp->port); 163 addr->a.v4.sin_port = htons(bp->port);
164 164
165 addr->use_as_src = use_as_src; 165 addr->use_as_src = use_as_src;
166 addr->valid = 1;
166 167
167 INIT_LIST_HEAD(&addr->list); 168 INIT_LIST_HEAD(&addr->list);
168 list_add_tail(&addr->list, &bp->address_list); 169 INIT_RCU_HEAD(&addr->rcu);
170
171 /* We always hold a socket lock when calling this function,
172 * and that acts as a writer synchronizing lock.
173 */
174 list_add_tail_rcu(&addr->list, &bp->address_list);
169 SCTP_DBG_OBJCNT_INC(addr); 175 SCTP_DBG_OBJCNT_INC(addr);
170 176
171 return 0; 177 return 0;
@@ -174,23 +180,35 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
174/* Delete an address from the bind address list in the SCTP_bind_addr 180/* Delete an address from the bind address list in the SCTP_bind_addr
175 * structure. 181 * structure.
176 */ 182 */
177int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) 183int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
184 void (*rcu_call)(struct rcu_head *head,
185 void (*func)(struct rcu_head *head)))
178{ 186{
179 struct list_head *pos, *temp; 187 struct sctp_sockaddr_entry *addr, *temp;
180 struct sctp_sockaddr_entry *addr;
181 188
182 list_for_each_safe(pos, temp, &bp->address_list) { 189 /* We hold the socket lock when calling this function,
183 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 190 * and that acts as a writer synchronizing lock.
191 */
192 list_for_each_entry_safe(addr, temp, &bp->address_list, list) {
184 if (sctp_cmp_addr_exact(&addr->a, del_addr)) { 193 if (sctp_cmp_addr_exact(&addr->a, del_addr)) {
185 /* Found the exact match. */ 194 /* Found the exact match. */
186 list_del(pos); 195 addr->valid = 0;
187 kfree(addr); 196 list_del_rcu(&addr->list);
188 SCTP_DBG_OBJCNT_DEC(addr); 197 break;
189
190 return 0;
191 } 198 }
192 } 199 }
193 200
201 /* Call the rcu callback provided in the args. This function is
202 * called by both BH packet processing and user side socket option
203 * processing, but it works on different lists in those 2 contexts.
204 * Each context provides it's own callback, whether call_rcu_bh()
205 * or call_rcu(), to make sure that we wait for an appropriate time.
206 */
207 if (addr && !addr->valid) {
208 rcu_call(&addr->rcu, sctp_local_addr_free);
209 SCTP_DBG_OBJCNT_DEC(addr);
210 }
211
194 return -EINVAL; 212 return -EINVAL;
195} 213}
196 214
@@ -300,15 +318,20 @@ int sctp_bind_addr_match(struct sctp_bind_addr *bp,
300 struct sctp_sock *opt) 318 struct sctp_sock *opt)
301{ 319{
302 struct sctp_sockaddr_entry *laddr; 320 struct sctp_sockaddr_entry *laddr;
303 struct list_head *pos; 321 int match = 0;
304 322
305 list_for_each(pos, &bp->address_list) { 323 rcu_read_lock();
306 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 324 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
307 if (opt->pf->cmp_addr(&laddr->a, addr, opt)) 325 if (!laddr->valid)
308 return 1; 326 continue;
327 if (opt->pf->cmp_addr(&laddr->a, addr, opt)) {
328 match = 1;
329 break;
330 }
309 } 331 }
332 rcu_read_unlock();
310 333
311 return 0; 334 return match;
312} 335}
313 336
314/* Find the first address in the bind address list that is not present in 337/* Find the first address in the bind address list that is not present in
@@ -323,18 +346,19 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
323 union sctp_addr *addr; 346 union sctp_addr *addr;
324 void *addr_buf; 347 void *addr_buf;
325 struct sctp_af *af; 348 struct sctp_af *af;
326 struct list_head *pos;
327 int i; 349 int i;
328 350
329 list_for_each(pos, &bp->address_list) { 351 /* This is only called sctp_send_asconf_del_ip() and we hold
330 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 352 * the socket lock in that code patch, so that address list
331 353 * can't change.
354 */
355 list_for_each_entry(laddr, &bp->address_list, list) {
332 addr_buf = (union sctp_addr *)addrs; 356 addr_buf = (union sctp_addr *)addrs;
333 for (i = 0; i < addrcnt; i++) { 357 for (i = 0; i < addrcnt; i++) {
334 addr = (union sctp_addr *)addr_buf; 358 addr = (union sctp_addr *)addr_buf;
335 af = sctp_get_af_specific(addr->v4.sin_family); 359 af = sctp_get_af_specific(addr->v4.sin_family);
336 if (!af) 360 if (!af)
337 return NULL; 361 break;
338 362
339 if (opt->pf->cmp_addr(&laddr->a, addr, opt)) 363 if (opt->pf->cmp_addr(&laddr->a, addr, opt))
340 break; 364 break;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 1404a9e2e78f..8f485a0d14bd 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -92,7 +92,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
92 92
93 /* Initialize the bind addr area */ 93 /* Initialize the bind addr area */
94 sctp_bind_addr_init(&ep->base.bind_addr, 0); 94 sctp_bind_addr_init(&ep->base.bind_addr, 0);
95 rwlock_init(&ep->base.addr_lock);
96 95
97 /* Remember who we are attached to. */ 96 /* Remember who we are attached to. */
98 ep->base.sk = sk; 97 ep->base.sk = sk;
@@ -225,21 +224,14 @@ void sctp_endpoint_put(struct sctp_endpoint *ep)
225struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, 224struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
226 const union sctp_addr *laddr) 225 const union sctp_addr *laddr)
227{ 226{
228 struct sctp_endpoint *retval; 227 struct sctp_endpoint *retval = NULL;
229 228
230 sctp_read_lock(&ep->base.addr_lock);
231 if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) { 229 if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) {
232 if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, 230 if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
233 sctp_sk(ep->base.sk))) { 231 sctp_sk(ep->base.sk)))
234 retval = ep; 232 retval = ep;
235 goto out;
236 }
237 } 233 }
238 234
239 retval = NULL;
240
241out:
242 sctp_read_unlock(&ep->base.addr_lock);
243 return retval; 235 return retval;
244} 236}
245 237
@@ -261,9 +253,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
261 list_for_each(pos, &ep->asocs) { 253 list_for_each(pos, &ep->asocs) {
262 asoc = list_entry(pos, struct sctp_association, asocs); 254 asoc = list_entry(pos, struct sctp_association, asocs);
263 if (rport == asoc->peer.port) { 255 if (rport == asoc->peer.port) {
264 sctp_read_lock(&asoc->base.addr_lock);
265 *transport = sctp_assoc_lookup_paddr(asoc, paddr); 256 *transport = sctp_assoc_lookup_paddr(asoc, paddr);
266 sctp_read_unlock(&asoc->base.addr_lock);
267 257
268 if (*transport) 258 if (*transport)
269 return asoc; 259 return asoc;
@@ -295,20 +285,17 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
295int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, 285int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
296 const union sctp_addr *paddr) 286 const union sctp_addr *paddr)
297{ 287{
298 struct list_head *pos;
299 struct sctp_sockaddr_entry *addr; 288 struct sctp_sockaddr_entry *addr;
300 struct sctp_bind_addr *bp; 289 struct sctp_bind_addr *bp;
301 290
302 sctp_read_lock(&ep->base.addr_lock);
303 bp = &ep->base.bind_addr; 291 bp = &ep->base.bind_addr;
304 list_for_each(pos, &bp->address_list) { 292 /* This function is called with the socket lock held,
305 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 293 * so the address_list can not change.
306 if (sctp_has_association(&addr->a, paddr)) { 294 */
307 sctp_read_unlock(&ep->base.addr_lock); 295 list_for_each_entry(addr, &bp->address_list, list) {
296 if (sctp_has_association(&addr->a, paddr))
308 return 1; 297 return 1;
309 }
310 } 298 }
311 sctp_read_unlock(&ep->base.addr_lock);
312 299
313 return 0; 300 return 0;
314} 301}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f8aa23dda1c1..670fd2740b89 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -77,13 +77,18 @@
77 77
78#include <asm/uaccess.h> 78#include <asm/uaccess.h>
79 79
80/* Event handler for inet6 address addition/deletion events. */ 80/* Event handler for inet6 address addition/deletion events.
81 * The sctp_local_addr_list needs to be protocted by a spin lock since
82 * multiple notifiers (say IPv4 and IPv6) may be running at the same
83 * time and thus corrupt the list.
84 * The reader side is protected with RCU.
85 */
81static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, 86static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
82 void *ptr) 87 void *ptr)
83{ 88{
84 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 89 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
85 struct sctp_sockaddr_entry *addr; 90 struct sctp_sockaddr_entry *addr = NULL;
86 struct list_head *pos, *temp; 91 struct sctp_sockaddr_entry *temp;
87 92
88 switch (ev) { 93 switch (ev) {
89 case NETDEV_UP: 94 case NETDEV_UP:
@@ -94,19 +99,26 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
94 memcpy(&addr->a.v6.sin6_addr, &ifa->addr, 99 memcpy(&addr->a.v6.sin6_addr, &ifa->addr,
95 sizeof(struct in6_addr)); 100 sizeof(struct in6_addr));
96 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; 101 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
97 list_add_tail(&addr->list, &sctp_local_addr_list); 102 addr->valid = 1;
103 spin_lock_bh(&sctp_local_addr_lock);
104 list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
105 spin_unlock_bh(&sctp_local_addr_lock);
98 } 106 }
99 break; 107 break;
100 case NETDEV_DOWN: 108 case NETDEV_DOWN:
101 list_for_each_safe(pos, temp, &sctp_local_addr_list) { 109 spin_lock_bh(&sctp_local_addr_lock);
102 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 110 list_for_each_entry_safe(addr, temp,
103 if (ipv6_addr_equal(&addr->a.v6.sin6_addr, &ifa->addr)) { 111 &sctp_local_addr_list, list) {
104 list_del(pos); 112 if (ipv6_addr_equal(&addr->a.v6.sin6_addr,
105 kfree(addr); 113 &ifa->addr)) {
114 addr->valid = 0;
115 list_del_rcu(&addr->list);
106 break; 116 break;
107 } 117 }
108 } 118 }
109 119 spin_unlock_bh(&sctp_local_addr_lock);
120 if (addr && !addr->valid)
121 call_rcu(&addr->rcu, sctp_local_addr_free);
110 break; 122 break;
111 } 123 }
112 124
@@ -290,9 +302,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
290 union sctp_addr *saddr) 302 union sctp_addr *saddr)
291{ 303{
292 struct sctp_bind_addr *bp; 304 struct sctp_bind_addr *bp;
293 rwlock_t *addr_lock;
294 struct sctp_sockaddr_entry *laddr; 305 struct sctp_sockaddr_entry *laddr;
295 struct list_head *pos;
296 sctp_scope_t scope; 306 sctp_scope_t scope;
297 union sctp_addr *baddr = NULL; 307 union sctp_addr *baddr = NULL;
298 __u8 matchlen = 0; 308 __u8 matchlen = 0;
@@ -312,14 +322,14 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
312 scope = sctp_scope(daddr); 322 scope = sctp_scope(daddr);
313 323
314 bp = &asoc->base.bind_addr; 324 bp = &asoc->base.bind_addr;
315 addr_lock = &asoc->base.addr_lock;
316 325
317 /* Go through the bind address list and find the best source address 326 /* Go through the bind address list and find the best source address
318 * that matches the scope of the destination address. 327 * that matches the scope of the destination address.
319 */ 328 */
320 sctp_read_lock(addr_lock); 329 rcu_read_lock();
321 list_for_each(pos, &bp->address_list) { 330 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
322 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 331 if (!laddr->valid)
332 continue;
323 if ((laddr->use_as_src) && 333 if ((laddr->use_as_src) &&
324 (laddr->a.sa.sa_family == AF_INET6) && 334 (laddr->a.sa.sa_family == AF_INET6) &&
325 (scope <= sctp_scope(&laddr->a))) { 335 (scope <= sctp_scope(&laddr->a))) {
@@ -341,7 +351,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
341 __FUNCTION__, asoc, NIP6(daddr->v6.sin6_addr)); 351 __FUNCTION__, asoc, NIP6(daddr->v6.sin6_addr));
342 } 352 }
343 353
344 sctp_read_unlock(addr_lock); 354 rcu_read_unlock();
345} 355}
346 356
347/* Make a copy of all potential local addresses. */ 357/* Make a copy of all potential local addresses. */
@@ -367,7 +377,9 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
367 addr->a.v6.sin6_port = 0; 377 addr->a.v6.sin6_port = 0;
368 addr->a.v6.sin6_addr = ifp->addr; 378 addr->a.v6.sin6_addr = ifp->addr;
369 addr->a.v6.sin6_scope_id = dev->ifindex; 379 addr->a.v6.sin6_scope_id = dev->ifindex;
380 addr->valid = 1;
370 INIT_LIST_HEAD(&addr->list); 381 INIT_LIST_HEAD(&addr->list);
382 INIT_RCU_HEAD(&addr->rcu);
371 list_add_tail(&addr->list, addrlist); 383 list_add_tail(&addr->list, addrlist);
372 } 384 }
373 } 385 }
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e98579b788b8..3d036cdfae41 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -153,6 +153,9 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
153 addr->a.v4.sin_family = AF_INET; 153 addr->a.v4.sin_family = AF_INET;
154 addr->a.v4.sin_port = 0; 154 addr->a.v4.sin_port = 0;
155 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 155 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
156 addr->valid = 1;
157 INIT_LIST_HEAD(&addr->list);
158 INIT_RCU_HEAD(&addr->rcu);
156 list_add_tail(&addr->list, addrlist); 159 list_add_tail(&addr->list, addrlist);
157 } 160 }
158 } 161 }
@@ -192,16 +195,24 @@ static void sctp_free_local_addr_list(void)
192 } 195 }
193} 196}
194 197
198void sctp_local_addr_free(struct rcu_head *head)
199{
200 struct sctp_sockaddr_entry *e = container_of(head,
201 struct sctp_sockaddr_entry, rcu);
202 kfree(e);
203}
204
195/* Copy the local addresses which are valid for 'scope' into 'bp'. */ 205/* Copy the local addresses which are valid for 'scope' into 'bp'. */
196int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, 206int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
197 gfp_t gfp, int copy_flags) 207 gfp_t gfp, int copy_flags)
198{ 208{
199 struct sctp_sockaddr_entry *addr; 209 struct sctp_sockaddr_entry *addr;
200 int error = 0; 210 int error = 0;
201 struct list_head *pos, *temp;
202 211
203 list_for_each_safe(pos, temp, &sctp_local_addr_list) { 212 rcu_read_lock();
204 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 213 list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
214 if (!addr->valid)
215 continue;
205 if (sctp_in_scope(&addr->a, scope)) { 216 if (sctp_in_scope(&addr->a, scope)) {
206 /* Now that the address is in scope, check to see if 217 /* Now that the address is in scope, check to see if
207 * the address type is really supported by the local 218 * the address type is really supported by the local
@@ -213,7 +224,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
213 (copy_flags & SCTP_ADDR6_ALLOWED) && 224 (copy_flags & SCTP_ADDR6_ALLOWED) &&
214 (copy_flags & SCTP_ADDR6_PEERSUPP)))) { 225 (copy_flags & SCTP_ADDR6_PEERSUPP)))) {
215 error = sctp_add_bind_addr(bp, &addr->a, 1, 226 error = sctp_add_bind_addr(bp, &addr->a, 1,
216 GFP_ATOMIC); 227 GFP_ATOMIC);
217 if (error) 228 if (error)
218 goto end_copy; 229 goto end_copy;
219 } 230 }
@@ -221,6 +232,7 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
221 } 232 }
222 233
223end_copy: 234end_copy:
235 rcu_read_unlock();
224 return error; 236 return error;
225} 237}
226 238
@@ -416,9 +428,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
416 struct rtable *rt; 428 struct rtable *rt;
417 struct flowi fl; 429 struct flowi fl;
418 struct sctp_bind_addr *bp; 430 struct sctp_bind_addr *bp;
419 rwlock_t *addr_lock;
420 struct sctp_sockaddr_entry *laddr; 431 struct sctp_sockaddr_entry *laddr;
421 struct list_head *pos;
422 struct dst_entry *dst = NULL; 432 struct dst_entry *dst = NULL;
423 union sctp_addr dst_saddr; 433 union sctp_addr dst_saddr;
424 434
@@ -447,23 +457,20 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
447 goto out; 457 goto out;
448 458
449 bp = &asoc->base.bind_addr; 459 bp = &asoc->base.bind_addr;
450 addr_lock = &asoc->base.addr_lock;
451 460
452 if (dst) { 461 if (dst) {
453 /* Walk through the bind address list and look for a bind 462 /* Walk through the bind address list and look for a bind
454 * address that matches the source address of the returned dst. 463 * address that matches the source address of the returned dst.
455 */ 464 */
456 sctp_read_lock(addr_lock); 465 rcu_read_lock();
457 list_for_each(pos, &bp->address_list) { 466 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
458 laddr = list_entry(pos, struct sctp_sockaddr_entry, 467 if (!laddr->valid || !laddr->use_as_src)
459 list);
460 if (!laddr->use_as_src)
461 continue; 468 continue;
462 sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); 469 sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port));
463 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) 470 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
464 goto out_unlock; 471 goto out_unlock;
465 } 472 }
466 sctp_read_unlock(addr_lock); 473 rcu_read_unlock();
467 474
468 /* None of the bound addresses match the source address of the 475 /* None of the bound addresses match the source address of the
469 * dst. So release it. 476 * dst. So release it.
@@ -475,10 +482,10 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
475 /* Walk through the bind address list and try to get a dst that 482 /* Walk through the bind address list and try to get a dst that
476 * matches a bind address as the source address. 483 * matches a bind address as the source address.
477 */ 484 */
478 sctp_read_lock(addr_lock); 485 rcu_read_lock();
479 list_for_each(pos, &bp->address_list) { 486 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
480 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 487 if (!laddr->valid)
481 488 continue;
482 if ((laddr->use_as_src) && 489 if ((laddr->use_as_src) &&
483 (AF_INET == laddr->a.sa.sa_family)) { 490 (AF_INET == laddr->a.sa.sa_family)) {
484 fl.fl4_src = laddr->a.v4.sin_addr.s_addr; 491 fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
@@ -490,7 +497,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
490 } 497 }
491 498
492out_unlock: 499out_unlock:
493 sctp_read_unlock(addr_lock); 500 rcu_read_unlock();
494out: 501out:
495 if (dst) 502 if (dst)
496 SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n", 503 SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n",
@@ -600,13 +607,18 @@ static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
600 seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); 607 seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr));
601} 608}
602 609
603/* Event handler for inet address addition/deletion events. */ 610/* Event handler for inet address addition/deletion events.
611 * The sctp_local_addr_list needs to be protocted by a spin lock since
612 * multiple notifiers (say IPv4 and IPv6) may be running at the same
613 * time and thus corrupt the list.
614 * The reader side is protected with RCU.
615 */
604static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, 616static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
605 void *ptr) 617 void *ptr)
606{ 618{
607 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 619 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
608 struct sctp_sockaddr_entry *addr; 620 struct sctp_sockaddr_entry *addr = NULL;
609 struct list_head *pos, *temp; 621 struct sctp_sockaddr_entry *temp;
610 622
611 switch (ev) { 623 switch (ev) {
612 case NETDEV_UP: 624 case NETDEV_UP:
@@ -615,19 +627,25 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
615 addr->a.v4.sin_family = AF_INET; 627 addr->a.v4.sin_family = AF_INET;
616 addr->a.v4.sin_port = 0; 628 addr->a.v4.sin_port = 0;
617 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 629 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
618 list_add_tail(&addr->list, &sctp_local_addr_list); 630 addr->valid = 1;
631 spin_lock_bh(&sctp_local_addr_lock);
632 list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
633 spin_unlock_bh(&sctp_local_addr_lock);
619 } 634 }
620 break; 635 break;
621 case NETDEV_DOWN: 636 case NETDEV_DOWN:
622 list_for_each_safe(pos, temp, &sctp_local_addr_list) { 637 spin_lock_bh(&sctp_local_addr_lock);
623 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 638 list_for_each_entry_safe(addr, temp,
639 &sctp_local_addr_list, list) {
624 if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { 640 if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) {
625 list_del(pos); 641 addr->valid = 0;
626 kfree(addr); 642 list_del_rcu(&addr->list);
627 break; 643 break;
628 } 644 }
629 } 645 }
630 646 spin_unlock_bh(&sctp_local_addr_lock);
647 if (addr && !addr->valid)
648 call_rcu(&addr->rcu, sctp_local_addr_free);
631 break; 649 break;
632 } 650 }
633 651
@@ -1160,6 +1178,7 @@ SCTP_STATIC __init int sctp_init(void)
1160 1178
1161 /* Initialize the local address list. */ 1179 /* Initialize the local address list. */
1162 INIT_LIST_HEAD(&sctp_local_addr_list); 1180 INIT_LIST_HEAD(&sctp_local_addr_list);
1181 spin_lock_init(&sctp_local_addr_lock);
1163 sctp_get_local_addr_list(); 1182 sctp_get_local_addr_list();
1164 1183
1165 /* Register notifier for inet address additions/deletions. */ 1184 /* Register notifier for inet address additions/deletions. */
@@ -1227,6 +1246,9 @@ SCTP_STATIC __exit void sctp_exit(void)
1227 sctp_v6_del_protocol(); 1246 sctp_v6_del_protocol();
1228 inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); 1247 inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
1229 1248
1249 /* Unregister notifier for inet address additions/deletions. */
1250 unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
1251
1230 /* Free the local address list. */ 1252 /* Free the local address list. */
1231 sctp_free_local_addr_list(); 1253 sctp_free_local_addr_list();
1232 1254
@@ -1240,9 +1262,6 @@ SCTP_STATIC __exit void sctp_exit(void)
1240 inet_unregister_protosw(&sctp_stream_protosw); 1262 inet_unregister_protosw(&sctp_stream_protosw);
1241 inet_unregister_protosw(&sctp_seqpacket_protosw); 1263 inet_unregister_protosw(&sctp_seqpacket_protosw);
1242 1264
1243 /* Unregister notifier for inet address additions/deletions. */
1244 unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
1245
1246 sctp_sysctl_unregister(); 1265 sctp_sysctl_unregister();
1247 list_del(&sctp_ipv4_specific.list); 1266 list_del(&sctp_ipv4_specific.list);
1248 1267
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 79856c924525..2e34220d94cd 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1531,7 +1531,7 @@ no_hmac:
1531 /* Also, add the destination address. */ 1531 /* Also, add the destination address. */
1532 if (list_empty(&retval->base.bind_addr.address_list)) { 1532 if (list_empty(&retval->base.bind_addr.address_list)) {
1533 sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1, 1533 sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1,
1534 GFP_ATOMIC); 1534 GFP_ATOMIC);
1535 } 1535 }
1536 1536
1537 retval->next_tsn = retval->c.initial_tsn; 1537 retval->next_tsn = retval->c.initial_tsn;
@@ -2613,22 +2613,16 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2613 2613
2614 switch (asconf_param->param_hdr.type) { 2614 switch (asconf_param->param_hdr.type) {
2615 case SCTP_PARAM_ADD_IP: 2615 case SCTP_PARAM_ADD_IP:
2616 sctp_local_bh_disable(); 2616 /* This is always done in BH context with a socket lock
2617 sctp_write_lock(&asoc->base.addr_lock); 2617 * held, so the list can not change.
2618 list_for_each(pos, &bp->address_list) { 2618 */
2619 saddr = list_entry(pos, struct sctp_sockaddr_entry, list); 2619 list_for_each_entry(saddr, &bp->address_list, list) {
2620 if (sctp_cmp_addr_exact(&saddr->a, &addr)) 2620 if (sctp_cmp_addr_exact(&saddr->a, &addr))
2621 saddr->use_as_src = 1; 2621 saddr->use_as_src = 1;
2622 } 2622 }
2623 sctp_write_unlock(&asoc->base.addr_lock);
2624 sctp_local_bh_enable();
2625 break; 2623 break;
2626 case SCTP_PARAM_DEL_IP: 2624 case SCTP_PARAM_DEL_IP:
2627 sctp_local_bh_disable(); 2625 retval = sctp_del_bind_addr(bp, &addr, call_rcu_bh);
2628 sctp_write_lock(&asoc->base.addr_lock);
2629 retval = sctp_del_bind_addr(bp, &addr);
2630 sctp_write_unlock(&asoc->base.addr_lock);
2631 sctp_local_bh_enable();
2632 list_for_each(pos, &asoc->peer.transport_addr_list) { 2626 list_for_each(pos, &asoc->peer.transport_addr_list) {
2633 transport = list_entry(pos, struct sctp_transport, 2627 transport = list_entry(pos, struct sctp_transport,
2634 transports); 2628 transports);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 33354602ae86..772fbfb4bfda 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -367,14 +367,10 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
367 if (!bp->port) 367 if (!bp->port)
368 bp->port = inet_sk(sk)->num; 368 bp->port = inet_sk(sk)->num;
369 369
370 /* Add the address to the bind address list. */ 370 /* Add the address to the bind address list.
371 sctp_local_bh_disable(); 371 * Use GFP_ATOMIC since BHs will be disabled.
372 sctp_write_lock(&ep->base.addr_lock); 372 */
373
374 /* Use GFP_ATOMIC since BHs are disabled. */
375 ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC); 373 ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC);
376 sctp_write_unlock(&ep->base.addr_lock);
377 sctp_local_bh_enable();
378 374
379 /* Copy back into socket for getsockname() use. */ 375 /* Copy back into socket for getsockname() use. */
380 if (!ret) { 376 if (!ret) {
@@ -544,15 +540,12 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
544 if (i < addrcnt) 540 if (i < addrcnt)
545 continue; 541 continue;
546 542
547 /* Use the first address in bind addr list of association as 543 /* Use the first valid address in bind addr list of
548 * Address Parameter of ASCONF CHUNK. 544 * association as Address Parameter of ASCONF CHUNK.
549 */ 545 */
550 sctp_read_lock(&asoc->base.addr_lock);
551 bp = &asoc->base.bind_addr; 546 bp = &asoc->base.bind_addr;
552 p = bp->address_list.next; 547 p = bp->address_list.next;
553 laddr = list_entry(p, struct sctp_sockaddr_entry, list); 548 laddr = list_entry(p, struct sctp_sockaddr_entry, list);
554 sctp_read_unlock(&asoc->base.addr_lock);
555
556 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, 549 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
557 addrcnt, SCTP_PARAM_ADD_IP); 550 addrcnt, SCTP_PARAM_ADD_IP);
558 if (!chunk) { 551 if (!chunk) {
@@ -567,8 +560,6 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
567 /* Add the new addresses to the bind address list with 560 /* Add the new addresses to the bind address list with
568 * use_as_src set to 0. 561 * use_as_src set to 0.
569 */ 562 */
570 sctp_local_bh_disable();
571 sctp_write_lock(&asoc->base.addr_lock);
572 addr_buf = addrs; 563 addr_buf = addrs;
573 for (i = 0; i < addrcnt; i++) { 564 for (i = 0; i < addrcnt; i++) {
574 addr = (union sctp_addr *)addr_buf; 565 addr = (union sctp_addr *)addr_buf;
@@ -578,8 +569,6 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
578 GFP_ATOMIC); 569 GFP_ATOMIC);
579 addr_buf += af->sockaddr_len; 570 addr_buf += af->sockaddr_len;
580 } 571 }
581 sctp_write_unlock(&asoc->base.addr_lock);
582 sctp_local_bh_enable();
583 } 572 }
584 573
585out: 574out:
@@ -651,13 +640,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
651 * socket routing and failover schemes. Refer to comments in 640 * socket routing and failover schemes. Refer to comments in
652 * sctp_do_bind(). -daisy 641 * sctp_do_bind(). -daisy
653 */ 642 */
654 sctp_local_bh_disable(); 643 retval = sctp_del_bind_addr(bp, sa_addr, call_rcu);
655 sctp_write_lock(&ep->base.addr_lock);
656
657 retval = sctp_del_bind_addr(bp, sa_addr);
658
659 sctp_write_unlock(&ep->base.addr_lock);
660 sctp_local_bh_enable();
661 644
662 addr_buf += af->sockaddr_len; 645 addr_buf += af->sockaddr_len;
663err_bindx_rem: 646err_bindx_rem:
@@ -748,14 +731,16 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
748 * make sure that we do not delete all the addresses in the 731 * make sure that we do not delete all the addresses in the
749 * association. 732 * association.
750 */ 733 */
751 sctp_read_lock(&asoc->base.addr_lock);
752 bp = &asoc->base.bind_addr; 734 bp = &asoc->base.bind_addr;
753 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 735 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
754 addrcnt, sp); 736 addrcnt, sp);
755 sctp_read_unlock(&asoc->base.addr_lock);
756 if (!laddr) 737 if (!laddr)
757 continue; 738 continue;
758 739
740 /* We do not need RCU protection throughout this loop
741 * because this is done under a socket lock from the
742 * setsockopt call.
743 */
759 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, 744 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
760 SCTP_PARAM_DEL_IP); 745 SCTP_PARAM_DEL_IP);
761 if (!chunk) { 746 if (!chunk) {
@@ -766,23 +751,16 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
766 /* Reset use_as_src flag for the addresses in the bind address 751 /* Reset use_as_src flag for the addresses in the bind address
767 * list that are to be deleted. 752 * list that are to be deleted.
768 */ 753 */
769 sctp_local_bh_disable();
770 sctp_write_lock(&asoc->base.addr_lock);
771 addr_buf = addrs; 754 addr_buf = addrs;
772 for (i = 0; i < addrcnt; i++) { 755 for (i = 0; i < addrcnt; i++) {
773 laddr = (union sctp_addr *)addr_buf; 756 laddr = (union sctp_addr *)addr_buf;
774 af = sctp_get_af_specific(laddr->v4.sin_family); 757 af = sctp_get_af_specific(laddr->v4.sin_family);
775 list_for_each(pos1, &bp->address_list) { 758 list_for_each_entry(saddr, &bp->address_list, list) {
776 saddr = list_entry(pos1,
777 struct sctp_sockaddr_entry,
778 list);
779 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 759 if (sctp_cmp_addr_exact(&saddr->a, laddr))
780 saddr->use_as_src = 0; 760 saddr->use_as_src = 0;
781 } 761 }
782 addr_buf += af->sockaddr_len; 762 addr_buf += af->sockaddr_len;
783 } 763 }
784 sctp_write_unlock(&asoc->base.addr_lock);
785 sctp_local_bh_enable();
786 764
787 /* Update the route and saddr entries for all the transports 765 /* Update the route and saddr entries for all the transports
788 * as some of the addresses in the bind address list are 766 * as some of the addresses in the bind address list are
@@ -4059,9 +4037,7 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
4059 sctp_assoc_t id; 4037 sctp_assoc_t id;
4060 struct sctp_bind_addr *bp; 4038 struct sctp_bind_addr *bp;
4061 struct sctp_association *asoc; 4039 struct sctp_association *asoc;
4062 struct list_head *pos, *temp;
4063 struct sctp_sockaddr_entry *addr; 4040 struct sctp_sockaddr_entry *addr;
4064 rwlock_t *addr_lock;
4065 int cnt = 0; 4041 int cnt = 0;
4066 4042
4067 if (len < sizeof(sctp_assoc_t)) 4043 if (len < sizeof(sctp_assoc_t))
@@ -4078,17 +4054,13 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
4078 */ 4054 */
4079 if (0 == id) { 4055 if (0 == id) {
4080 bp = &sctp_sk(sk)->ep->base.bind_addr; 4056 bp = &sctp_sk(sk)->ep->base.bind_addr;
4081 addr_lock = &sctp_sk(sk)->ep->base.addr_lock;
4082 } else { 4057 } else {
4083 asoc = sctp_id2assoc(sk, id); 4058 asoc = sctp_id2assoc(sk, id);
4084 if (!asoc) 4059 if (!asoc)
4085 return -EINVAL; 4060 return -EINVAL;
4086 bp = &asoc->base.bind_addr; 4061 bp = &asoc->base.bind_addr;
4087 addr_lock = &asoc->base.addr_lock;
4088 } 4062 }
4089 4063
4090 sctp_read_lock(addr_lock);
4091
4092 /* If the endpoint is bound to 0.0.0.0 or ::0, count the valid 4064 /* If the endpoint is bound to 0.0.0.0 or ::0, count the valid
4093 * addresses from the global local address list. 4065 * addresses from the global local address list.
4094 */ 4066 */
@@ -4096,27 +4068,33 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
4096 addr = list_entry(bp->address_list.next, 4068 addr = list_entry(bp->address_list.next,
4097 struct sctp_sockaddr_entry, list); 4069 struct sctp_sockaddr_entry, list);
4098 if (sctp_is_any(&addr->a)) { 4070 if (sctp_is_any(&addr->a)) {
4099 list_for_each_safe(pos, temp, &sctp_local_addr_list) { 4071 rcu_read_lock();
4100 addr = list_entry(pos, 4072 list_for_each_entry_rcu(addr,
4101 struct sctp_sockaddr_entry, 4073 &sctp_local_addr_list, list) {
4102 list); 4074 if (!addr->valid)
4075 continue;
4076
4103 if ((PF_INET == sk->sk_family) && 4077 if ((PF_INET == sk->sk_family) &&
4104 (AF_INET6 == addr->a.sa.sa_family)) 4078 (AF_INET6 == addr->a.sa.sa_family))
4105 continue; 4079 continue;
4080
4106 cnt++; 4081 cnt++;
4107 } 4082 }
4083 rcu_read_unlock();
4108 } else { 4084 } else {
4109 cnt = 1; 4085 cnt = 1;
4110 } 4086 }
4111 goto done; 4087 goto done;
4112 } 4088 }
4113 4089
4114 list_for_each(pos, &bp->address_list) { 4090 /* Protection on the bound address list is not needed,
4091 * since in the socket option context we hold the socket lock,
4092 * so there is no way that the bound address list can change.
4093 */
4094 list_for_each_entry(addr, &bp->address_list, list) {
4115 cnt ++; 4095 cnt ++;
4116 } 4096 }
4117
4118done: 4097done:
4119 sctp_read_unlock(addr_lock);
4120 return cnt; 4098 return cnt;
4121} 4099}
4122 4100
@@ -4127,14 +4105,16 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
4127 int max_addrs, void *to, 4105 int max_addrs, void *to,
4128 int *bytes_copied) 4106 int *bytes_copied)
4129{ 4107{
4130 struct list_head *pos, *next;
4131 struct sctp_sockaddr_entry *addr; 4108 struct sctp_sockaddr_entry *addr;
4132 union sctp_addr temp; 4109 union sctp_addr temp;
4133 int cnt = 0; 4110 int cnt = 0;
4134 int addrlen; 4111 int addrlen;
4135 4112
4136 list_for_each_safe(pos, next, &sctp_local_addr_list) { 4113 rcu_read_lock();
4137 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 4114 list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
4115 if (!addr->valid)
4116 continue;
4117
4138 if ((PF_INET == sk->sk_family) && 4118 if ((PF_INET == sk->sk_family) &&
4139 (AF_INET6 == addr->a.sa.sa_family)) 4119 (AF_INET6 == addr->a.sa.sa_family))
4140 continue; 4120 continue;
@@ -4149,6 +4129,7 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
4149 cnt ++; 4129 cnt ++;
4150 if (cnt >= max_addrs) break; 4130 if (cnt >= max_addrs) break;
4151 } 4131 }
4132 rcu_read_unlock();
4152 4133
4153 return cnt; 4134 return cnt;
4154} 4135}
@@ -4156,14 +4137,16 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
4156static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, 4137static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4157 size_t space_left, int *bytes_copied) 4138 size_t space_left, int *bytes_copied)
4158{ 4139{
4159 struct list_head *pos, *next;
4160 struct sctp_sockaddr_entry *addr; 4140 struct sctp_sockaddr_entry *addr;
4161 union sctp_addr temp; 4141 union sctp_addr temp;
4162 int cnt = 0; 4142 int cnt = 0;
4163 int addrlen; 4143 int addrlen;
4164 4144
4165 list_for_each_safe(pos, next, &sctp_local_addr_list) { 4145 rcu_read_lock();
4166 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 4146 list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
4147 if (!addr->valid)
4148 continue;
4149
4167 if ((PF_INET == sk->sk_family) && 4150 if ((PF_INET == sk->sk_family) &&
4168 (AF_INET6 == addr->a.sa.sa_family)) 4151 (AF_INET6 == addr->a.sa.sa_family))
4169 continue; 4152 continue;
@@ -4171,8 +4154,10 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4171 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), 4154 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
4172 &temp); 4155 &temp);
4173 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4156 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4174 if (space_left < addrlen) 4157 if (space_left < addrlen) {
4175 return -ENOMEM; 4158 cnt = -ENOMEM;
4159 break;
4160 }
4176 memcpy(to, &temp, addrlen); 4161 memcpy(to, &temp, addrlen);
4177 4162
4178 to += addrlen; 4163 to += addrlen;
@@ -4180,6 +4165,7 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4180 space_left -= addrlen; 4165 space_left -= addrlen;
4181 *bytes_copied += addrlen; 4166 *bytes_copied += addrlen;
4182 } 4167 }
4168 rcu_read_unlock();
4183 4169
4184 return cnt; 4170 return cnt;
4185} 4171}
@@ -4192,7 +4178,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4192{ 4178{
4193 struct sctp_bind_addr *bp; 4179 struct sctp_bind_addr *bp;
4194 struct sctp_association *asoc; 4180 struct sctp_association *asoc;
4195 struct list_head *pos;
4196 int cnt = 0; 4181 int cnt = 0;
4197 struct sctp_getaddrs_old getaddrs; 4182 struct sctp_getaddrs_old getaddrs;
4198 struct sctp_sockaddr_entry *addr; 4183 struct sctp_sockaddr_entry *addr;
@@ -4200,7 +4185,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4200 union sctp_addr temp; 4185 union sctp_addr temp;
4201 struct sctp_sock *sp = sctp_sk(sk); 4186 struct sctp_sock *sp = sctp_sk(sk);
4202 int addrlen; 4187 int addrlen;
4203 rwlock_t *addr_lock;
4204 int err = 0; 4188 int err = 0;
4205 void *addrs; 4189 void *addrs;
4206 void *buf; 4190 void *buf;
@@ -4222,13 +4206,11 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4222 */ 4206 */
4223 if (0 == getaddrs.assoc_id) { 4207 if (0 == getaddrs.assoc_id) {
4224 bp = &sctp_sk(sk)->ep->base.bind_addr; 4208 bp = &sctp_sk(sk)->ep->base.bind_addr;
4225 addr_lock = &sctp_sk(sk)->ep->base.addr_lock;
4226 } else { 4209 } else {
4227 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4210 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4228 if (!asoc) 4211 if (!asoc)
4229 return -EINVAL; 4212 return -EINVAL;
4230 bp = &asoc->base.bind_addr; 4213 bp = &asoc->base.bind_addr;
4231 addr_lock = &asoc->base.addr_lock;
4232 } 4214 }
4233 4215
4234 to = getaddrs.addrs; 4216 to = getaddrs.addrs;
@@ -4242,8 +4224,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4242 if (!addrs) 4224 if (!addrs)
4243 return -ENOMEM; 4225 return -ENOMEM;
4244 4226
4245 sctp_read_lock(addr_lock);
4246
4247 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 4227 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
4248 * addresses from the global local address list. 4228 * addresses from the global local address list.
4249 */ 4229 */
@@ -4259,8 +4239,11 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4259 } 4239 }
4260 4240
4261 buf = addrs; 4241 buf = addrs;
4262 list_for_each(pos, &bp->address_list) { 4242 /* Protection on the bound address list is not needed since
4263 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 4243 * in the socket option context we hold a socket lock and
4244 * thus the bound address list can't change.
4245 */
4246 list_for_each_entry(addr, &bp->address_list, list) {
4264 memcpy(&temp, &addr->a, sizeof(temp)); 4247 memcpy(&temp, &addr->a, sizeof(temp));
4265 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4248 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4266 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4249 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
@@ -4272,8 +4255,6 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4272 } 4255 }
4273 4256
4274copy_getaddrs: 4257copy_getaddrs:
4275 sctp_read_unlock(addr_lock);
4276
4277 /* copy the entire address list into the user provided space */ 4258 /* copy the entire address list into the user provided space */
4278 if (copy_to_user(to, addrs, bytes_copied)) { 4259 if (copy_to_user(to, addrs, bytes_copied)) {
4279 err = -EFAULT; 4260 err = -EFAULT;
@@ -4295,7 +4276,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4295{ 4276{
4296 struct sctp_bind_addr *bp; 4277 struct sctp_bind_addr *bp;
4297 struct sctp_association *asoc; 4278 struct sctp_association *asoc;
4298 struct list_head *pos;
4299 int cnt = 0; 4279 int cnt = 0;
4300 struct sctp_getaddrs getaddrs; 4280 struct sctp_getaddrs getaddrs;
4301 struct sctp_sockaddr_entry *addr; 4281 struct sctp_sockaddr_entry *addr;
@@ -4303,7 +4283,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4303 union sctp_addr temp; 4283 union sctp_addr temp;
4304 struct sctp_sock *sp = sctp_sk(sk); 4284 struct sctp_sock *sp = sctp_sk(sk);
4305 int addrlen; 4285 int addrlen;
4306 rwlock_t *addr_lock;
4307 int err = 0; 4286 int err = 0;
4308 size_t space_left; 4287 size_t space_left;
4309 int bytes_copied = 0; 4288 int bytes_copied = 0;
@@ -4324,13 +4303,11 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4324 */ 4303 */
4325 if (0 == getaddrs.assoc_id) { 4304 if (0 == getaddrs.assoc_id) {
4326 bp = &sctp_sk(sk)->ep->base.bind_addr; 4305 bp = &sctp_sk(sk)->ep->base.bind_addr;
4327 addr_lock = &sctp_sk(sk)->ep->base.addr_lock;
4328 } else { 4306 } else {
4329 asoc = sctp_id2assoc(sk, getaddrs.assoc_id); 4307 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4330 if (!asoc) 4308 if (!asoc)
4331 return -EINVAL; 4309 return -EINVAL;
4332 bp = &asoc->base.bind_addr; 4310 bp = &asoc->base.bind_addr;
4333 addr_lock = &asoc->base.addr_lock;
4334 } 4311 }
4335 4312
4336 to = optval + offsetof(struct sctp_getaddrs,addrs); 4313 to = optval + offsetof(struct sctp_getaddrs,addrs);
@@ -4340,8 +4317,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4340 if (!addrs) 4317 if (!addrs)
4341 return -ENOMEM; 4318 return -ENOMEM;
4342 4319
4343 sctp_read_lock(addr_lock);
4344
4345 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid 4320 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
4346 * addresses from the global local address list. 4321 * addresses from the global local address list.
4347 */ 4322 */
@@ -4353,21 +4328,24 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4353 space_left, &bytes_copied); 4328 space_left, &bytes_copied);
4354 if (cnt < 0) { 4329 if (cnt < 0) {
4355 err = cnt; 4330 err = cnt;
4356 goto error_lock; 4331 goto out;
4357 } 4332 }
4358 goto copy_getaddrs; 4333 goto copy_getaddrs;
4359 } 4334 }
4360 } 4335 }
4361 4336
4362 buf = addrs; 4337 buf = addrs;
4363 list_for_each(pos, &bp->address_list) { 4338 /* Protection on the bound address list is not needed since
4364 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 4339 * in the socket option context we hold a socket lock and
4340 * thus the bound address list can't change.
4341 */
4342 list_for_each_entry(addr, &bp->address_list, list) {
4365 memcpy(&temp, &addr->a, sizeof(temp)); 4343 memcpy(&temp, &addr->a, sizeof(temp));
4366 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4344 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4367 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4345 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4368 if (space_left < addrlen) { 4346 if (space_left < addrlen) {
4369 err = -ENOMEM; /*fixme: right error?*/ 4347 err = -ENOMEM; /*fixme: right error?*/
4370 goto error_lock; 4348 goto out;
4371 } 4349 }
4372 memcpy(buf, &temp, addrlen); 4350 memcpy(buf, &temp, addrlen);
4373 buf += addrlen; 4351 buf += addrlen;
@@ -4377,8 +4355,6 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4377 } 4355 }
4378 4356
4379copy_getaddrs: 4357copy_getaddrs:
4380 sctp_read_unlock(addr_lock);
4381
4382 if (copy_to_user(to, addrs, bytes_copied)) { 4358 if (copy_to_user(to, addrs, bytes_copied)) {
4383 err = -EFAULT; 4359 err = -EFAULT;
4384 goto out; 4360 goto out;
@@ -4389,12 +4365,6 @@ copy_getaddrs:
4389 } 4365 }
4390 if (put_user(bytes_copied, optlen)) 4366 if (put_user(bytes_copied, optlen))
4391 err = -EFAULT; 4367 err = -EFAULT;
4392
4393 goto out;
4394
4395error_lock:
4396 sctp_read_unlock(addr_lock);
4397
4398out: 4368out:
4399 kfree(addrs); 4369 kfree(addrs);
4400 return err; 4370 return err;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 12ff5da8160e..1a899924023f 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1592,7 +1592,7 @@ svc_age_temp_sockets(unsigned long closure)
1592 1592
1593 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags)) 1593 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
1594 continue; 1594 continue;
1595 if (atomic_read(&svsk->sk_inuse) || test_bit(SK_BUSY, &svsk->sk_flags)) 1595 if (atomic_read(&svsk->sk_inuse) > 1 || test_bit(SK_BUSY, &svsk->sk_flags))
1596 continue; 1596 continue;
1597 atomic_inc(&svsk->sk_inuse); 1597 atomic_inc(&svsk->sk_inuse);
1598 list_move(le, &to_be_aged); 1598 list_move(le, &to_be_aged);