aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/endpointola.c
diff options
context:
space:
mode:
authorVlad Yasevich <vladislav.yasevich@hp.com>2007-09-16 19:03:28 -0400
committerDavid S. Miller <davem@davemloft.net>2007-09-16 19:03:28 -0400
commit559cf710b07c5e2cfa3fb8d8f4a1320fd84c53f9 (patch)
treedeb74aea811a7d7c7e203f3743fd15372f8a6589 /net/sctp/endpointola.c
parent293035479942400a7fe8e4f72465d4e4e466b91a (diff)
[SCTP]: Convert bind_addr_list locking to RCU
Since the sctp_sockaddr_entry is now RCU enabled as part of the patch to synchronize sctp_localaddr_list, it makes sense to change all handling of these entries to RCU. This includes the sctp_bind_addrs structure and it's list of bound addresses. This list is currently protected by an external rw_lock and that looks like an overkill. There are only 2 writers to the list: bind()/bindx() calls, and BH processing of ASCONF-ACK chunks. These are already seriealized via the socket lock, so they will not step on each other. These are also relatively rare, so we should be good with RCU. The readers are varied and they are easily converted to RCU. Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Sridhar Samdurala <sri@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/endpointola.c')
-rw-r--r--net/sctp/endpointola.c27
1 files changed, 7 insertions, 20 deletions
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 1404a9e2e78..8f485a0d14b 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -92,7 +92,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
92 92
93 /* Initialize the bind addr area */ 93 /* Initialize the bind addr area */
94 sctp_bind_addr_init(&ep->base.bind_addr, 0); 94 sctp_bind_addr_init(&ep->base.bind_addr, 0);
95 rwlock_init(&ep->base.addr_lock);
96 95
97 /* Remember who we are attached to. */ 96 /* Remember who we are attached to. */
98 ep->base.sk = sk; 97 ep->base.sk = sk;
@@ -225,21 +224,14 @@ void sctp_endpoint_put(struct sctp_endpoint *ep)
225struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, 224struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep,
226 const union sctp_addr *laddr) 225 const union sctp_addr *laddr)
227{ 226{
228 struct sctp_endpoint *retval; 227 struct sctp_endpoint *retval = NULL;
229 228
230 sctp_read_lock(&ep->base.addr_lock);
231 if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) { 229 if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) {
232 if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, 230 if (sctp_bind_addr_match(&ep->base.bind_addr, laddr,
233 sctp_sk(ep->base.sk))) { 231 sctp_sk(ep->base.sk)))
234 retval = ep; 232 retval = ep;
235 goto out;
236 }
237 } 233 }
238 234
239 retval = NULL;
240
241out:
242 sctp_read_unlock(&ep->base.addr_lock);
243 return retval; 235 return retval;
244} 236}
245 237
@@ -261,9 +253,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
261 list_for_each(pos, &ep->asocs) { 253 list_for_each(pos, &ep->asocs) {
262 asoc = list_entry(pos, struct sctp_association, asocs); 254 asoc = list_entry(pos, struct sctp_association, asocs);
263 if (rport == asoc->peer.port) { 255 if (rport == asoc->peer.port) {
264 sctp_read_lock(&asoc->base.addr_lock);
265 *transport = sctp_assoc_lookup_paddr(asoc, paddr); 256 *transport = sctp_assoc_lookup_paddr(asoc, paddr);
266 sctp_read_unlock(&asoc->base.addr_lock);
267 257
268 if (*transport) 258 if (*transport)
269 return asoc; 259 return asoc;
@@ -295,20 +285,17 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
295int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, 285int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
296 const union sctp_addr *paddr) 286 const union sctp_addr *paddr)
297{ 287{
298 struct list_head *pos;
299 struct sctp_sockaddr_entry *addr; 288 struct sctp_sockaddr_entry *addr;
300 struct sctp_bind_addr *bp; 289 struct sctp_bind_addr *bp;
301 290
302 sctp_read_lock(&ep->base.addr_lock);
303 bp = &ep->base.bind_addr; 291 bp = &ep->base.bind_addr;
304 list_for_each(pos, &bp->address_list) { 292 /* This function is called with the socket lock held,
305 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 293 * so the address_list can not change.
306 if (sctp_has_association(&addr->a, paddr)) { 294 */
307 sctp_read_unlock(&ep->base.addr_lock); 295 list_for_each_entry(addr, &bp->address_list, list) {
296 if (sctp_has_association(&addr->a, paddr))
308 return 1; 297 return 1;
309 }
310 } 298 }
311 sctp_read_unlock(&ep->base.addr_lock);
312 299
313 return 0; 300 return 0;
314} 301}