aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2010-05-11 18:15:15 -0400
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:15:12 -0400
commitbcf50ef2ce3c5d8f2fe995259da16677898cb300 (patch)
tree4a90d8f1b77b027977ee2e5229a239faa9848177
parentabf454398c2ebafc629ebb8b149f5a752c79e919 (diff)
rds: use RCU to protect the connection hash
The connection hash was almost entirely RCU ready, this just makes the final couple of changes to use RCU instead of spinlocks for everything. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--net/rds/connection.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 87df15b9f8e4..180b83ab2607 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -62,6 +62,7 @@ static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
62 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \ 62 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
63} while (0) 63} while (0)
64 64
65/* rcu read lock must be held or the connection spinlock */
65static struct rds_connection *rds_conn_lookup(struct hlist_head *head, 66static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
66 __be32 laddr, __be32 faddr, 67 __be32 laddr, __be32 faddr,
67 struct rds_transport *trans) 68 struct rds_transport *trans)
@@ -69,7 +70,7 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
69 struct rds_connection *conn, *ret = NULL; 70 struct rds_connection *conn, *ret = NULL;
70 struct hlist_node *pos; 71 struct hlist_node *pos;
71 72
72 hlist_for_each_entry(conn, pos, head, c_hash_node) { 73 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
73 if (conn->c_faddr == faddr && conn->c_laddr == laddr && 74 if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
74 conn->c_trans == trans) { 75 conn->c_trans == trans) {
75 ret = conn; 76 ret = conn;
@@ -119,7 +120,8 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
119 unsigned long flags; 120 unsigned long flags;
120 int ret; 121 int ret;
121 122
122 spin_lock_irqsave(&rds_conn_lock, flags); 123
124 rcu_read_lock();
123 conn = rds_conn_lookup(head, laddr, faddr, trans); 125 conn = rds_conn_lookup(head, laddr, faddr, trans);
124 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && 126 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
125 !is_outgoing) { 127 !is_outgoing) {
@@ -130,7 +132,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
130 parent = conn; 132 parent = conn;
131 conn = parent->c_passive; 133 conn = parent->c_passive;
132 } 134 }
133 spin_unlock_irqrestore(&rds_conn_lock, flags); 135 rcu_read_unlock();
134 if (conn) 136 if (conn)
135 goto out; 137 goto out;
136 138
@@ -227,7 +229,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
227 kmem_cache_free(rds_conn_slab, conn); 229 kmem_cache_free(rds_conn_slab, conn);
228 conn = found; 230 conn = found;
229 } else { 231 } else {
230 hlist_add_head(&conn->c_hash_node, head); 232 hlist_add_head_rcu(&conn->c_hash_node, head);
231 rds_cong_add_conn(conn); 233 rds_cong_add_conn(conn);
232 rds_conn_count++; 234 rds_conn_count++;
233 } 235 }
@@ -306,8 +308,13 @@ void rds_conn_shutdown(struct rds_connection *conn)
306 * to the conn hash, so we never trigger a reconnect on this 308 * to the conn hash, so we never trigger a reconnect on this
307 * conn - the reconnect is always triggered by the active peer. */ 309 * conn - the reconnect is always triggered by the active peer. */
308 cancel_delayed_work_sync(&conn->c_conn_w); 310 cancel_delayed_work_sync(&conn->c_conn_w);
309 if (!hlist_unhashed(&conn->c_hash_node)) 311 rcu_read_lock();
312 if (!hlist_unhashed(&conn->c_hash_node)) {
313 rcu_read_unlock();
310 rds_queue_reconnect(conn); 314 rds_queue_reconnect(conn);
315 } else {
316 rcu_read_unlock();
317 }
311} 318}
312 319
313/* 320/*
@@ -323,14 +330,12 @@ void rds_conn_destroy(struct rds_connection *conn)
323 330
324 /* Ensure conn will not be scheduled for reconnect */ 331 /* Ensure conn will not be scheduled for reconnect */
325 spin_lock_irq(&rds_conn_lock); 332 spin_lock_irq(&rds_conn_lock);
326 hlist_del_init(&conn->c_hash_node); 333 hlist_del_init_rcu(&conn->c_hash_node);
327 spin_unlock_irq(&rds_conn_lock); 334 spin_unlock_irq(&rds_conn_lock);
328 335
329 /* wait for the rds thread to shut it down */ 336 synchronize_rcu();
330 atomic_set(&conn->c_state, RDS_CONN_ERROR); 337
331 cancel_delayed_work(&conn->c_conn_w); 338 rds_conn_shutdown(conn);
332 queue_work(rds_wq, &conn->c_down_w);
333 flush_workqueue(rds_wq);
334 339
335 /* tear down queued messages */ 340 /* tear down queued messages */
336 list_for_each_entry_safe(rm, rtmp, 341 list_for_each_entry_safe(rm, rtmp,
@@ -369,17 +374,16 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
369 struct list_head *list; 374 struct list_head *list;
370 struct rds_connection *conn; 375 struct rds_connection *conn;
371 struct rds_message *rm; 376 struct rds_message *rm;
372 unsigned long flags;
373 unsigned int total = 0; 377 unsigned int total = 0;
374 size_t i; 378 size_t i;
375 379
376 len /= sizeof(struct rds_info_message); 380 len /= sizeof(struct rds_info_message);
377 381
378 spin_lock_irqsave(&rds_conn_lock, flags); 382 rcu_read_lock();
379 383
380 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 384 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
381 i++, head++) { 385 i++, head++) {
382 hlist_for_each_entry(conn, pos, head, c_hash_node) { 386 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
383 if (want_send) 387 if (want_send)
384 list = &conn->c_send_queue; 388 list = &conn->c_send_queue;
385 else 389 else
@@ -399,8 +403,7 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
399 spin_unlock(&conn->c_lock); 403 spin_unlock(&conn->c_lock);
400 } 404 }
401 } 405 }
402 406 rcu_read_unlock();
403 spin_unlock_irqrestore(&rds_conn_lock, flags);
404 407
405 lens->nr = total; 408 lens->nr = total;
406 lens->each = sizeof(struct rds_info_message); 409 lens->each = sizeof(struct rds_info_message);
@@ -430,19 +433,17 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
430 uint64_t buffer[(item_len + 7) / 8]; 433 uint64_t buffer[(item_len + 7) / 8];
431 struct hlist_head *head; 434 struct hlist_head *head;
432 struct hlist_node *pos; 435 struct hlist_node *pos;
433 struct hlist_node *tmp;
434 struct rds_connection *conn; 436 struct rds_connection *conn;
435 unsigned long flags;
436 size_t i; 437 size_t i;
437 438
438 spin_lock_irqsave(&rds_conn_lock, flags); 439 rcu_read_lock();
439 440
440 lens->nr = 0; 441 lens->nr = 0;
441 lens->each = item_len; 442 lens->each = item_len;
442 443
443 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 444 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
444 i++, head++) { 445 i++, head++) {
445 hlist_for_each_entry_safe(conn, pos, tmp, head, c_hash_node) { 446 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
446 447
447 /* XXX no c_lock usage.. */ 448 /* XXX no c_lock usage.. */
448 if (!visitor(conn, buffer)) 449 if (!visitor(conn, buffer))
@@ -458,8 +459,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
458 lens->nr++; 459 lens->nr++;
459 } 460 }
460 } 461 }
461 462 rcu_read_unlock();
462 spin_unlock_irqrestore(&rds_conn_lock, flags);
463} 463}
464EXPORT_SYMBOL_GPL(rds_for_each_conn_info); 464EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
465 465