aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/sm_make_chunk.c
diff options
context:
space:
mode:
authorVlad Yasevich <vladislav.yasevich@hp.com>2007-09-16 19:03:28 -0400
committerDavid S. Miller <davem@davemloft.net>2007-09-16 19:03:28 -0400
commit559cf710b07c5e2cfa3fb8d8f4a1320fd84c53f9 (patch)
treedeb74aea811a7d7c7e203f3743fd15372f8a6589 /net/sctp/sm_make_chunk.c
parent293035479942400a7fe8e4f72465d4e4e466b91a (diff)
[SCTP]: Convert bind_addr_list locking to RCU
Since the sctp_sockaddr_entry is now RCU enabled as part of the patch to synchronize sctp_localaddr_list, it makes sense to change all handling of these entries to RCU. This includes the sctp_bind_addrs structure and it's list of bound addresses. This list is currently protected by an external rw_lock and that looks like an overkill. There are only 2 writers to the list: bind()/bindx() calls, and BH processing of ASCONF-ACK chunks. These are already seriealized via the socket lock, so they will not step on each other. These are also relatively rare, so we should be good with RCU. The readers are varied and they are easily converted to RCU. Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Sridhar Samdurala <sri@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/sm_make_chunk.c')
-rw-r--r--net/sctp/sm_make_chunk.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 79856c924525..2e34220d94cd 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1531,7 +1531,7 @@ no_hmac:
1531 /* Also, add the destination address. */ 1531 /* Also, add the destination address. */
1532 if (list_empty(&retval->base.bind_addr.address_list)) { 1532 if (list_empty(&retval->base.bind_addr.address_list)) {
1533 sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1, 1533 sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1,
1534 GFP_ATOMIC); 1534 GFP_ATOMIC);
1535 } 1535 }
1536 1536
1537 retval->next_tsn = retval->c.initial_tsn; 1537 retval->next_tsn = retval->c.initial_tsn;
@@ -2613,22 +2613,16 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
2613 2613
2614 switch (asconf_param->param_hdr.type) { 2614 switch (asconf_param->param_hdr.type) {
2615 case SCTP_PARAM_ADD_IP: 2615 case SCTP_PARAM_ADD_IP:
2616 sctp_local_bh_disable(); 2616 /* This is always done in BH context with a socket lock
2617 sctp_write_lock(&asoc->base.addr_lock); 2617 * held, so the list can not change.
2618 list_for_each(pos, &bp->address_list) { 2618 */
2619 saddr = list_entry(pos, struct sctp_sockaddr_entry, list); 2619 list_for_each_entry(saddr, &bp->address_list, list) {
2620 if (sctp_cmp_addr_exact(&saddr->a, &addr)) 2620 if (sctp_cmp_addr_exact(&saddr->a, &addr))
2621 saddr->use_as_src = 1; 2621 saddr->use_as_src = 1;
2622 } 2622 }
2623 sctp_write_unlock(&asoc->base.addr_lock);
2624 sctp_local_bh_enable();
2625 break; 2623 break;
2626 case SCTP_PARAM_DEL_IP: 2624 case SCTP_PARAM_DEL_IP:
2627 sctp_local_bh_disable(); 2625 retval = sctp_del_bind_addr(bp, &addr, call_rcu_bh);
2628 sctp_write_lock(&asoc->base.addr_lock);
2629 retval = sctp_del_bind_addr(bp, &addr);
2630 sctp_write_unlock(&asoc->base.addr_lock);
2631 sctp_local_bh_enable();
2632 list_for_each(pos, &asoc->peer.transport_addr_list) { 2626 list_for_each(pos, &asoc->peer.transport_addr_list) {
2633 transport = list_entry(pos, struct sctp_transport, 2627 transport = list_entry(pos, struct sctp_transport,
2634 transports); 2628 transports);