aboutsummaryrefslogtreecommitdiffstats
path: root/net/9p/trans_fd.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2008-09-24 17:22:23 -0400
committerEric Van Hensbergen <ericvh@ericvh-desktop.austin.ibm.com>2008-09-24 17:22:23 -0400
commit7dc5d24be06a5ed874af035d52a083a7b61ef1bd (patch)
tree74f8e59f6a32ffc099dca3eb1eed9d6e6b58b616 /net/9p/trans_fd.c
parent72029fe85d8d060b3f966f2dbc36b3c75b5a6532 (diff)
9p-trans_fd: fix trans_fd::p9_conn_destroy()
p9_conn_destroy() first kills all current requests by calling p9_conn_cancel(), then waits for the request list to be cleared by waiting on p9_conn->equeue. After that, polling is stopped and the trans is destroyed. This sequence has a few problems. * Read and write works were never cancelled and the p9_conn can be destroyed while the works are running as r/w works remove requests from the list and dereference the p9_conn from them. * The list emptiness wait using p9_conn->equeue wouldn't trigger because p9_conn_cancel() always clears all the lists and the only way the wait can be triggered is to have another task to issue a request between the slim window between p9_conn_cancel() and the wait, which isn't safe under the current implementation with or without the wait. This patch fixes the problem by first stopping poll, which can schedule r/w works, first and cancle r/w works which guarantees that r/w works are not and will not run from that point and then calling p9_conn_cancel() and do the rest of destruction. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
Diffstat (limited to 'net/9p/trans_fd.c')
-rw-r--r--net/9p/trans_fd.c24
1 files changed, 5 insertions, 19 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 6a32ffdb9429..ee0d151da31a 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -151,7 +151,6 @@ struct p9_mux_poll_task {
151 * @trans: reference to transport instance for this connection 151 * @trans: reference to transport instance for this connection
152 * @tagpool: id accounting for transactions 152 * @tagpool: id accounting for transactions
153 * @err: error state 153 * @err: error state
154 * @equeue: event wait_q (?)
155 * @req_list: accounting for requests which have been sent 154 * @req_list: accounting for requests which have been sent
156 * @unsent_req_list: accounting for requests that haven't been sent 155 * @unsent_req_list: accounting for requests that haven't been sent
157 * @rcall: current response &p9_fcall structure 156 * @rcall: current response &p9_fcall structure
@@ -178,7 +177,6 @@ struct p9_conn {
178 struct p9_trans *trans; 177 struct p9_trans *trans;
179 struct p9_idpool *tagpool; 178 struct p9_idpool *tagpool;
180 int err; 179 int err;
181 wait_queue_head_t equeue;
182 struct list_head req_list; 180 struct list_head req_list;
183 struct list_head unsent_req_list; 181 struct list_head unsent_req_list;
184 struct p9_fcall *rcall; 182 struct p9_fcall *rcall;
@@ -430,7 +428,6 @@ static struct p9_conn *p9_conn_create(struct p9_trans *trans)
430 } 428 }
431 429
432 m->err = 0; 430 m->err = 0;
433 init_waitqueue_head(&m->equeue);
434 INIT_LIST_HEAD(&m->req_list); 431 INIT_LIST_HEAD(&m->req_list);
435 INIT_LIST_HEAD(&m->unsent_req_list); 432 INIT_LIST_HEAD(&m->unsent_req_list);
436 m->rcall = NULL; 433 m->rcall = NULL;
@@ -483,18 +480,13 @@ static void p9_conn_destroy(struct p9_conn *m)
483{ 480{
484 P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m, 481 P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
485 m->mux_list.prev, m->mux_list.next); 482 m->mux_list.prev, m->mux_list.next);
486 p9_conn_cancel(m, -ECONNRESET);
487
488 if (!list_empty(&m->req_list)) {
489 /* wait until all processes waiting on this session exit */
490 P9_DPRINTK(P9_DEBUG_MUX,
491 "mux %p waiting for empty request queue\n", m);
492 wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
493 P9_DPRINTK(P9_DEBUG_MUX, "mux %p request queue empty: %d\n", m,
494 list_empty(&m->req_list));
495 }
496 483
497 p9_mux_poll_stop(m); 484 p9_mux_poll_stop(m);
485 cancel_work_sync(&m->rq);
486 cancel_work_sync(&m->wq);
487
488 p9_conn_cancel(m, -ECONNRESET);
489
498 m->trans = NULL; 490 m->trans = NULL;
499 p9_idpool_destroy(m->tagpool); 491 p9_idpool_destroy(m->tagpool);
500 kfree(m); 492 kfree(m);
@@ -840,8 +832,6 @@ static void p9_read_work(struct work_struct *work)
840 (*req->cb) (req, req->cba); 832 (*req->cb) (req, req->cba);
841 else 833 else
842 kfree(req->rcall); 834 kfree(req->rcall);
843
844 wake_up(&m->equeue);
845 } 835 }
846 } else { 836 } else {
847 if (err >= 0 && rcall->id != P9_RFLUSH) 837 if (err >= 0 && rcall->id != P9_RFLUSH)
@@ -984,8 +974,6 @@ static void p9_mux_flush_cb(struct p9_req *freq, void *a)
984 (*req->cb) (req, req->cba); 974 (*req->cb) (req, req->cba);
985 else 975 else
986 kfree(req->rcall); 976 kfree(req->rcall);
987
988 wake_up(&m->equeue);
989 } 977 }
990 978
991 kfree(freq->tcall); 979 kfree(freq->tcall);
@@ -1191,8 +1179,6 @@ void p9_conn_cancel(struct p9_conn *m, int err)
1191 else 1179 else
1192 kfree(req->rcall); 1180 kfree(req->rcall);
1193 } 1181 }
1194
1195 wake_up(&m->equeue);
1196} 1182}
1197 1183
1198/** 1184/**