aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/bind_addr.c
diff options
context:
space:
mode:
authorVlad Yasevich <vladislav.yasevich@hp.com>2007-09-16 19:03:28 -0400
committerDavid S. Miller <davem@davemloft.net>2007-09-16 19:03:28 -0400
commit559cf710b07c5e2cfa3fb8d8f4a1320fd84c53f9 (patch)
treedeb74aea811a7d7c7e203f3743fd15372f8a6589 /net/sctp/bind_addr.c
parent293035479942400a7fe8e4f72465d4e4e466b91a (diff)
[SCTP]: Convert bind_addr_list locking to RCU
Since the sctp_sockaddr_entry is now RCU enabled as part of the patch to synchronize sctp_localaddr_list, it makes sense to change all handling of these entries to RCU. This includes the sctp_bind_addrs structure and it's list of bound addresses. This list is currently protected by an external rw_lock and that looks like an overkill. There are only 2 writers to the list: bind()/bindx() calls, and BH processing of ASCONF-ACK chunks. These are already seriealized via the socket lock, so they will not step on each other. These are also relatively rare, so we should be good with RCU. The readers are varied and they are easily converted to RCU. Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Sridhar Samdurala <sri@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/bind_addr.c')
-rw-r--r--net/sctp/bind_addr.c68
1 files changed, 45 insertions, 23 deletions
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 7fc369f9035d..d35cbf5aae33 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -167,7 +167,11 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
167 167
168 INIT_LIST_HEAD(&addr->list); 168 INIT_LIST_HEAD(&addr->list);
169 INIT_RCU_HEAD(&addr->rcu); 169 INIT_RCU_HEAD(&addr->rcu);
170 list_add_tail(&addr->list, &bp->address_list); 170
171 /* We always hold a socket lock when calling this function,
172 * and that acts as a writer synchronizing lock.
173 */
174 list_add_tail_rcu(&addr->list, &bp->address_list);
171 SCTP_DBG_OBJCNT_INC(addr); 175 SCTP_DBG_OBJCNT_INC(addr);
172 176
173 return 0; 177 return 0;
@@ -176,23 +180,35 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
176/* Delete an address from the bind address list in the SCTP_bind_addr 180/* Delete an address from the bind address list in the SCTP_bind_addr
177 * structure. 181 * structure.
178 */ 182 */
179int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) 183int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
184 void (*rcu_call)(struct rcu_head *head,
185 void (*func)(struct rcu_head *head)))
180{ 186{
181 struct list_head *pos, *temp; 187 struct sctp_sockaddr_entry *addr, *temp;
182 struct sctp_sockaddr_entry *addr;
183 188
184 list_for_each_safe(pos, temp, &bp->address_list) { 189 /* We hold the socket lock when calling this function,
185 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 190 * and that acts as a writer synchronizing lock.
191 */
192 list_for_each_entry_safe(addr, temp, &bp->address_list, list) {
186 if (sctp_cmp_addr_exact(&addr->a, del_addr)) { 193 if (sctp_cmp_addr_exact(&addr->a, del_addr)) {
187 /* Found the exact match. */ 194 /* Found the exact match. */
188 list_del(pos); 195 addr->valid = 0;
189 kfree(addr); 196 list_del_rcu(&addr->list);
190 SCTP_DBG_OBJCNT_DEC(addr); 197 break;
191
192 return 0;
193 } 198 }
194 } 199 }
195 200
201 /* Call the rcu callback provided in the args. This function is
202 * called by both BH packet processing and user side socket option
203 * processing, but it works on different lists in those 2 contexts.
204 * Each context provides it's own callback, whether call_rcu_bh()
205 * or call_rcu(), to make sure that we wait for an appropriate time.
206 */
207 if (addr && !addr->valid) {
208 rcu_call(&addr->rcu, sctp_local_addr_free);
209 SCTP_DBG_OBJCNT_DEC(addr);
210 }
211
196 return -EINVAL; 212 return -EINVAL;
197} 213}
198 214
@@ -302,15 +318,20 @@ int sctp_bind_addr_match(struct sctp_bind_addr *bp,
302 struct sctp_sock *opt) 318 struct sctp_sock *opt)
303{ 319{
304 struct sctp_sockaddr_entry *laddr; 320 struct sctp_sockaddr_entry *laddr;
305 struct list_head *pos; 321 int match = 0;
306 322
307 list_for_each(pos, &bp->address_list) { 323 rcu_read_lock();
308 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 324 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
309 if (opt->pf->cmp_addr(&laddr->a, addr, opt)) 325 if (!laddr->valid)
310 return 1; 326 continue;
327 if (opt->pf->cmp_addr(&laddr->a, addr, opt)) {
328 match = 1;
329 break;
330 }
311 } 331 }
332 rcu_read_unlock();
312 333
313 return 0; 334 return match;
314} 335}
315 336
316/* Find the first address in the bind address list that is not present in 337/* Find the first address in the bind address list that is not present in
@@ -325,18 +346,19 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
325 union sctp_addr *addr; 346 union sctp_addr *addr;
326 void *addr_buf; 347 void *addr_buf;
327 struct sctp_af *af; 348 struct sctp_af *af;
328 struct list_head *pos;
329 int i; 349 int i;
330 350
331 list_for_each(pos, &bp->address_list) { 351 /* This is only called sctp_send_asconf_del_ip() and we hold
332 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 352 * the socket lock in that code patch, so that address list
333 353 * can't change.
354 */
355 list_for_each_entry(laddr, &bp->address_list, list) {
334 addr_buf = (union sctp_addr *)addrs; 356 addr_buf = (union sctp_addr *)addrs;
335 for (i = 0; i < addrcnt; i++) { 357 for (i = 0; i < addrcnt; i++) {
336 addr = (union sctp_addr *)addr_buf; 358 addr = (union sctp_addr *)addr_buf;
337 af = sctp_get_af_specific(addr->v4.sin_family); 359 af = sctp_get_af_specific(addr->v4.sin_family);
338 if (!af) 360 if (!af)
339 return NULL; 361 break;
340 362
341 if (opt->pf->cmp_addr(&laddr->a, addr, opt)) 363 if (opt->pf->cmp_addr(&laddr->a, addr, opt))
342 break; 364 break;