diff options
author | Andy Grover <andy.grover@oracle.com> | 2010-06-11 16:49:13 -0400 |
---|---|---|
committer | Andy Grover <andy.grover@oracle.com> | 2010-09-08 21:10:13 -0400 |
commit | 2dc393573430f853e56e25bf4b41c34ba2aa8fd6 (patch) | |
tree | f7a3ddb99aab472aa5054a10043419d4b22bb312 | |
parent | 9de0864cf55927a7383b5ba6e48834ff3ef053de (diff) |
RDS: move rds_shutdown_worker impl. to rds_conn_shutdown
This fits better in connection.c, rather than threads.c.
Signed-off-by: Andy Grover <andy.grover@oracle.com>
-rw-r--r-- | net/rds/connection.c | 53 | ||||
-rw-r--r-- | net/rds/rds.h | 2 | ||||
-rw-r--r-- | net/rds/threads.c | 61 |
3 files changed, 63 insertions, 53 deletions
diff --git a/net/rds/connection.c b/net/rds/connection.c index 7619b671ca28..895e39cdc6a6 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c | |||
@@ -263,6 +263,59 @@ struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, | |||
263 | } | 263 | } |
264 | EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); | 264 | EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); |
265 | 265 | ||
266 | void rds_conn_shutdown(struct rds_connection *conn) | ||
267 | { | ||
268 | /* shut it down unless it's down already */ | ||
269 | if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) { | ||
270 | /* | ||
271 | * Quiesce the connection mgmt handlers before we start tearing | ||
272 | * things down. We don't hold the mutex for the entire | ||
273 | * duration of the shutdown operation, else we may be | ||
274 | * deadlocking with the CM handler. Instead, the CM event | ||
275 | * handler is supposed to check for state DISCONNECTING | ||
276 | */ | ||
277 | mutex_lock(&conn->c_cm_lock); | ||
278 | if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) | ||
279 | && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { | ||
280 | rds_conn_error(conn, "shutdown called in state %d\n", | ||
281 | atomic_read(&conn->c_state)); | ||
282 | mutex_unlock(&conn->c_cm_lock); | ||
283 | return; | ||
284 | } | ||
285 | mutex_unlock(&conn->c_cm_lock); | ||
286 | |||
287 | mutex_lock(&conn->c_send_lock); | ||
288 | conn->c_trans->conn_shutdown(conn); | ||
289 | rds_conn_reset(conn); | ||
290 | mutex_unlock(&conn->c_send_lock); | ||
291 | |||
292 | if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) { | ||
293 | /* This can happen - eg when we're in the middle of tearing | ||
294 | * down the connection, and someone unloads the rds module. | ||
295 | * Quite reproduceable with loopback connections. | ||
296 | * Mostly harmless. | ||
297 | */ | ||
298 | rds_conn_error(conn, | ||
299 | "%s: failed to transition to state DOWN, " | ||
300 | "current state is %d\n", | ||
301 | __func__, | ||
302 | atomic_read(&conn->c_state)); | ||
303 | return; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | /* Then reconnect if it's still live. | ||
308 | * The passive side of an IB loopback connection is never added | ||
309 | * to the conn hash, so we never trigger a reconnect on this | ||
310 | * conn - the reconnect is always triggered by the active peer. */ | ||
311 | cancel_delayed_work_sync(&conn->c_conn_w); | ||
312 | if (!hlist_unhashed(&conn->c_hash_node)) | ||
313 | rds_queue_reconnect(conn); | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * Stop and free a connection. | ||
318 | */ | ||
266 | void rds_conn_destroy(struct rds_connection *conn) | 319 | void rds_conn_destroy(struct rds_connection *conn) |
267 | { | 320 | { |
268 | struct rds_message *rm, *rtmp; | 321 | struct rds_message *rm, *rtmp; |
diff --git a/net/rds/rds.h b/net/rds/rds.h index c224b5bb3ba9..1d3eef67137f 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
@@ -527,6 +527,7 @@ struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr, | |||
527 | struct rds_transport *trans, gfp_t gfp); | 527 | struct rds_transport *trans, gfp_t gfp); |
528 | struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, | 528 | struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, |
529 | struct rds_transport *trans, gfp_t gfp); | 529 | struct rds_transport *trans, gfp_t gfp); |
530 | void rds_conn_shutdown(struct rds_connection *conn); | ||
530 | void rds_conn_destroy(struct rds_connection *conn); | 531 | void rds_conn_destroy(struct rds_connection *conn); |
531 | void rds_conn_reset(struct rds_connection *conn); | 532 | void rds_conn_reset(struct rds_connection *conn); |
532 | void rds_conn_drop(struct rds_connection *conn); | 533 | void rds_conn_drop(struct rds_connection *conn); |
@@ -681,6 +682,7 @@ extern unsigned int rds_sysctl_trace_level; | |||
681 | int __init rds_threads_init(void); | 682 | int __init rds_threads_init(void); |
682 | void rds_threads_exit(void); | 683 | void rds_threads_exit(void); |
683 | extern struct workqueue_struct *rds_wq; | 684 | extern struct workqueue_struct *rds_wq; |
685 | void rds_queue_reconnect(struct rds_connection *conn); | ||
684 | void rds_connect_worker(struct work_struct *); | 686 | void rds_connect_worker(struct work_struct *); |
685 | void rds_shutdown_worker(struct work_struct *); | 687 | void rds_shutdown_worker(struct work_struct *); |
686 | void rds_send_worker(struct work_struct *); | 688 | void rds_send_worker(struct work_struct *); |
diff --git a/net/rds/threads.c b/net/rds/threads.c index 786c20eaaf5e..6e2e43d5f576 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c | |||
@@ -110,7 +110,7 @@ EXPORT_SYMBOL_GPL(rds_connect_complete); | |||
110 | * We should *always* start with a random backoff; otherwise a broken connection | 110 | * We should *always* start with a random backoff; otherwise a broken connection |
111 | * will always take several iterations to be re-established. | 111 | * will always take several iterations to be re-established. |
112 | */ | 112 | */ |
113 | static void rds_queue_reconnect(struct rds_connection *conn) | 113 | void rds_queue_reconnect(struct rds_connection *conn) |
114 | { | 114 | { |
115 | unsigned long rand; | 115 | unsigned long rand; |
116 | 116 | ||
@@ -156,58 +156,6 @@ void rds_connect_worker(struct work_struct *work) | |||
156 | } | 156 | } |
157 | } | 157 | } |
158 | 158 | ||
159 | void rds_shutdown_worker(struct work_struct *work) | ||
160 | { | ||
161 | struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w); | ||
162 | |||
163 | /* shut it down unless it's down already */ | ||
164 | if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) { | ||
165 | /* | ||
166 | * Quiesce the connection mgmt handlers before we start tearing | ||
167 | * things down. We don't hold the mutex for the entire | ||
168 | * duration of the shutdown operation, else we may be | ||
169 | * deadlocking with the CM handler. Instead, the CM event | ||
170 | * handler is supposed to check for state DISCONNECTING | ||
171 | */ | ||
172 | mutex_lock(&conn->c_cm_lock); | ||
173 | if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) && | ||
174 | !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { | ||
175 | rds_conn_error(conn, "shutdown called in state %d\n", | ||
176 | atomic_read(&conn->c_state)); | ||
177 | mutex_unlock(&conn->c_cm_lock); | ||
178 | return; | ||
179 | } | ||
180 | mutex_unlock(&conn->c_cm_lock); | ||
181 | |||
182 | mutex_lock(&conn->c_send_lock); | ||
183 | conn->c_trans->conn_shutdown(conn); | ||
184 | rds_conn_reset(conn); | ||
185 | mutex_unlock(&conn->c_send_lock); | ||
186 | |||
187 | if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) { | ||
188 | /* This can happen - eg when we're in the middle of tearing | ||
189 | * down the connection, and someone unloads the rds module. | ||
190 | * Quite reproduceable with loopback connections. | ||
191 | * Mostly harmless. | ||
192 | */ | ||
193 | rds_conn_error(conn, | ||
194 | "%s: failed to transition to state DOWN, " | ||
195 | "current state is %d\n", | ||
196 | __func__, | ||
197 | atomic_read(&conn->c_state)); | ||
198 | return; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | /* Then reconnect if it's still live. | ||
203 | * The passive side of an IB loopback connection is never added | ||
204 | * to the conn hash, so we never trigger a reconnect on this | ||
205 | * conn - the reconnect is always triggered by the active peer. */ | ||
206 | cancel_delayed_work(&conn->c_conn_w); | ||
207 | if (!hlist_unhashed(&conn->c_hash_node)) | ||
208 | rds_queue_reconnect(conn); | ||
209 | } | ||
210 | |||
211 | void rds_send_worker(struct work_struct *work) | 159 | void rds_send_worker(struct work_struct *work) |
212 | { | 160 | { |
213 | struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work); | 161 | struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work); |
@@ -252,6 +200,13 @@ void rds_recv_worker(struct work_struct *work) | |||
252 | } | 200 | } |
253 | } | 201 | } |
254 | 202 | ||
203 | void rds_shutdown_worker(struct work_struct *work) | ||
204 | { | ||
205 | struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w); | ||
206 | |||
207 | rds_conn_shutdown(conn); | ||
208 | } | ||
209 | |||
255 | void rds_threads_exit(void) | 210 | void rds_threads_exit(void) |
256 | { | 211 | { |
257 | destroy_workqueue(rds_wq); | 212 | destroy_workqueue(rds_wq); |