diff options
Diffstat (limited to 'net/rds/threads.c')
-rw-r--r-- | net/rds/threads.c | 61 |
1 files changed, 8 insertions, 53 deletions
diff --git a/net/rds/threads.c b/net/rds/threads.c index 786c20eaaf5e..6e2e43d5f576 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c | |||
@@ -110,7 +110,7 @@ EXPORT_SYMBOL_GPL(rds_connect_complete); | |||
110 | * We should *always* start with a random backoff; otherwise a broken connection | 110 | * We should *always* start with a random backoff; otherwise a broken connection |
111 | * will always take several iterations to be re-established. | 111 | * will always take several iterations to be re-established. |
112 | */ | 112 | */ |
113 | static void rds_queue_reconnect(struct rds_connection *conn) | 113 | void rds_queue_reconnect(struct rds_connection *conn) |
114 | { | 114 | { |
115 | unsigned long rand; | 115 | unsigned long rand; |
116 | 116 | ||
@@ -156,58 +156,6 @@ void rds_connect_worker(struct work_struct *work) | |||
156 | } | 156 | } |
157 | } | 157 | } |
158 | 158 | ||
159 | void rds_shutdown_worker(struct work_struct *work) | ||
160 | { | ||
161 | struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w); | ||
162 | |||
163 | /* shut it down unless it's down already */ | ||
164 | if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) { | ||
165 | /* | ||
166 | * Quiesce the connection mgmt handlers before we start tearing | ||
167 | * things down. We don't hold the mutex for the entire | ||
168 | * duration of the shutdown operation, else we may be | ||
169 | * deadlocking with the CM handler. Instead, the CM event | ||
170 | * handler is supposed to check for state DISCONNECTING | ||
171 | */ | ||
172 | mutex_lock(&conn->c_cm_lock); | ||
173 | if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) && | ||
174 | !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { | ||
175 | rds_conn_error(conn, "shutdown called in state %d\n", | ||
176 | atomic_read(&conn->c_state)); | ||
177 | mutex_unlock(&conn->c_cm_lock); | ||
178 | return; | ||
179 | } | ||
180 | mutex_unlock(&conn->c_cm_lock); | ||
181 | |||
182 | mutex_lock(&conn->c_send_lock); | ||
183 | conn->c_trans->conn_shutdown(conn); | ||
184 | rds_conn_reset(conn); | ||
185 | mutex_unlock(&conn->c_send_lock); | ||
186 | |||
187 | if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) { | ||
188 | /* This can happen - eg when we're in the middle of tearing | ||
189 | * down the connection, and someone unloads the rds module. | ||
190 | * Quite reproduceable with loopback connections. | ||
191 | * Mostly harmless. | ||
192 | */ | ||
193 | rds_conn_error(conn, | ||
194 | "%s: failed to transition to state DOWN, " | ||
195 | "current state is %d\n", | ||
196 | __func__, | ||
197 | atomic_read(&conn->c_state)); | ||
198 | return; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | /* Then reconnect if it's still live. | ||
203 | * The passive side of an IB loopback connection is never added | ||
204 | * to the conn hash, so we never trigger a reconnect on this | ||
205 | * conn - the reconnect is always triggered by the active peer. */ | ||
206 | cancel_delayed_work(&conn->c_conn_w); | ||
207 | if (!hlist_unhashed(&conn->c_hash_node)) | ||
208 | rds_queue_reconnect(conn); | ||
209 | } | ||
210 | |||
211 | void rds_send_worker(struct work_struct *work) | 159 | void rds_send_worker(struct work_struct *work) |
212 | { | 160 | { |
213 | struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work); | 161 | struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work); |
@@ -252,6 +200,13 @@ void rds_recv_worker(struct work_struct *work) | |||
252 | } | 200 | } |
253 | } | 201 | } |
254 | 202 | ||
203 | void rds_shutdown_worker(struct work_struct *work) | ||
204 | { | ||
205 | struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w); | ||
206 | |||
207 | rds_conn_shutdown(conn); | ||
208 | } | ||
209 | |||
255 | void rds_threads_exit(void) | 210 | void rds_threads_exit(void) |
256 | { | 211 | { |
257 | destroy_workqueue(rds_wq); | 212 | destroy_workqueue(rds_wq); |