diff options
Diffstat (limited to 'net/9p/trans_fd.c')
-rw-r--r-- | net/9p/trans_fd.c | 1431 |
1 files changed, 464 insertions, 967 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 6dabbdb66651..be65d8242fd2 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -39,12 +39,11 @@ | |||
39 | #include <linux/file.h> | 39 | #include <linux/file.h> |
40 | #include <linux/parser.h> | 40 | #include <linux/parser.h> |
41 | #include <net/9p/9p.h> | 41 | #include <net/9p/9p.h> |
42 | #include <net/9p/client.h> | ||
42 | #include <net/9p/transport.h> | 43 | #include <net/9p/transport.h> |
43 | 44 | ||
44 | #define P9_PORT 564 | 45 | #define P9_PORT 564 |
45 | #define MAX_SOCK_BUF (64*1024) | 46 | #define MAX_SOCK_BUF (64*1024) |
46 | #define ERREQFLUSH 1 | ||
47 | #define SCHED_TIMEOUT 10 | ||
48 | #define MAXPOLLWADDR 2 | 47 | #define MAXPOLLWADDR 2 |
49 | 48 | ||
50 | /** | 49 | /** |
@@ -61,7 +60,6 @@ struct p9_fd_opts { | |||
61 | u16 port; | 60 | u16 port; |
62 | }; | 61 | }; |
63 | 62 | ||
64 | |||
65 | /** | 63 | /** |
66 | * struct p9_trans_fd - transport state | 64 | * struct p9_trans_fd - transport state |
67 | * @rd: reference to file to read from | 65 | * @rd: reference to file to read from |
@@ -100,60 +98,22 @@ enum { | |||
100 | Wpending = 8, /* can write */ | 98 | Wpending = 8, /* can write */ |
101 | }; | 99 | }; |
102 | 100 | ||
103 | enum { | 101 | struct p9_poll_wait { |
104 | None, | 102 | struct p9_conn *conn; |
105 | Flushing, | 103 | wait_queue_t wait; |
106 | Flushed, | 104 | wait_queue_head_t *wait_addr; |
107 | }; | ||
108 | |||
109 | struct p9_req; | ||
110 | typedef void (*p9_conn_req_callback)(struct p9_req *req, void *a); | ||
111 | |||
112 | /** | ||
113 | * struct p9_req - fd mux encoding of an rpc transaction | ||
114 | * @lock: protects req_list | ||
115 | * @tag: numeric tag for rpc transaction | ||
116 | * @tcall: request &p9_fcall structure | ||
117 | * @rcall: response &p9_fcall structure | ||
118 | * @err: error state | ||
119 | * @cb: callback for when response is received | ||
120 | * @cba: argument to pass to callback | ||
121 | * @flush: flag to indicate RPC has been flushed | ||
122 | * @req_list: list link for higher level objects to chain requests | ||
123 | * | ||
124 | */ | ||
125 | |||
126 | struct p9_req { | ||
127 | spinlock_t lock; | ||
128 | int tag; | ||
129 | struct p9_fcall *tcall; | ||
130 | struct p9_fcall *rcall; | ||
131 | int err; | ||
132 | p9_conn_req_callback cb; | ||
133 | void *cba; | ||
134 | int flush; | ||
135 | struct list_head req_list; | ||
136 | }; | ||
137 | |||
138 | struct p9_mux_poll_task { | ||
139 | struct task_struct *task; | ||
140 | struct list_head mux_list; | ||
141 | int muxnum; | ||
142 | }; | 105 | }; |
143 | 106 | ||
144 | /** | 107 | /** |
145 | * struct p9_conn - fd mux connection state information | 108 | * struct p9_conn - fd mux connection state information |
146 | * @lock: protects mux_list (?) | ||
147 | * @mux_list: list link for mux to manage multiple connections (?) | 109 | * @mux_list: list link for mux to manage multiple connections (?) |
148 | * @poll_task: task polling on this connection | 110 | * @client: reference to client instance for this connection |
149 | * @msize: maximum size for connection (dup) | ||
150 | * @extended: 9p2000.u flag (dup) | ||
151 | * @trans: reference to transport instance for this connection | ||
152 | * @tagpool: id accounting for transactions | ||
153 | * @err: error state | 111 | * @err: error state |
154 | * @req_list: accounting for requests which have been sent | 112 | * @req_list: accounting for requests which have been sent |
155 | * @unsent_req_list: accounting for requests that haven't been sent | 113 | * @unsent_req_list: accounting for requests that haven't been sent |
156 | * @rcall: current response &p9_fcall structure | 114 | * @req: current request being processed (if any) |
115 | * @tmp_buf: temporary buffer to read in header | ||
116 | * @rsize: amount to read for current frame | ||
157 | * @rpos: read position in current frame | 117 | * @rpos: read position in current frame |
158 | * @rbuf: current read buffer | 118 | * @rbuf: current read buffer |
159 | * @wpos: write position for current frame | 119 | * @wpos: write position for current frame |
@@ -169,409 +129,300 @@ struct p9_mux_poll_task { | |||
169 | */ | 129 | */ |
170 | 130 | ||
171 | struct p9_conn { | 131 | struct p9_conn { |
172 | spinlock_t lock; /* protect lock structure */ | ||
173 | struct list_head mux_list; | 132 | struct list_head mux_list; |
174 | struct p9_mux_poll_task *poll_task; | 133 | struct p9_client *client; |
175 | int msize; | ||
176 | unsigned char extended; | ||
177 | struct p9_trans *trans; | ||
178 | struct p9_idpool *tagpool; | ||
179 | int err; | 134 | int err; |
180 | struct list_head req_list; | 135 | struct list_head req_list; |
181 | struct list_head unsent_req_list; | 136 | struct list_head unsent_req_list; |
182 | struct p9_fcall *rcall; | 137 | struct p9_req_t *req; |
138 | char tmp_buf[7]; | ||
139 | int rsize; | ||
183 | int rpos; | 140 | int rpos; |
184 | char *rbuf; | 141 | char *rbuf; |
185 | int wpos; | 142 | int wpos; |
186 | int wsize; | 143 | int wsize; |
187 | char *wbuf; | 144 | char *wbuf; |
188 | wait_queue_t poll_wait[MAXPOLLWADDR]; | 145 | struct list_head poll_pending_link; |
189 | wait_queue_head_t *poll_waddr[MAXPOLLWADDR]; | 146 | struct p9_poll_wait poll_wait[MAXPOLLWADDR]; |
190 | poll_table pt; | 147 | poll_table pt; |
191 | struct work_struct rq; | 148 | struct work_struct rq; |
192 | struct work_struct wq; | 149 | struct work_struct wq; |
193 | unsigned long wsched; | 150 | unsigned long wsched; |
194 | }; | 151 | }; |
195 | 152 | ||
196 | /** | 153 | static DEFINE_SPINLOCK(p9_poll_lock); |
197 | * struct p9_mux_rpc - fd mux rpc accounting structure | 154 | static LIST_HEAD(p9_poll_pending_list); |
198 | * @m: connection this request was issued on | ||
199 | * @err: error state | ||
200 | * @tcall: request &p9_fcall | ||
201 | * @rcall: response &p9_fcall | ||
202 | * @wqueue: wait queue that client is blocked on for this rpc | ||
203 | * | ||
204 | * Bug: isn't this information duplicated elsewhere like &p9_req | ||
205 | */ | ||
206 | |||
207 | struct p9_mux_rpc { | ||
208 | struct p9_conn *m; | ||
209 | int err; | ||
210 | struct p9_fcall *tcall; | ||
211 | struct p9_fcall *rcall; | ||
212 | wait_queue_head_t wqueue; | ||
213 | }; | ||
214 | |||
215 | static int p9_poll_proc(void *); | ||
216 | static void p9_read_work(struct work_struct *work); | ||
217 | static void p9_write_work(struct work_struct *work); | ||
218 | static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, | ||
219 | poll_table *p); | ||
220 | static int p9_fd_write(struct p9_trans *trans, void *v, int len); | ||
221 | static int p9_fd_read(struct p9_trans *trans, void *v, int len); | ||
222 | |||
223 | static DEFINE_MUTEX(p9_mux_task_lock); | ||
224 | static struct workqueue_struct *p9_mux_wq; | 155 | static struct workqueue_struct *p9_mux_wq; |
156 | static struct task_struct *p9_poll_task; | ||
225 | 157 | ||
226 | static int p9_mux_num; | 158 | static void p9_mux_poll_stop(struct p9_conn *m) |
227 | static int p9_mux_poll_task_num; | ||
228 | static struct p9_mux_poll_task p9_mux_poll_tasks[100]; | ||
229 | |||
230 | static void p9_conn_destroy(struct p9_conn *); | ||
231 | static unsigned int p9_fd_poll(struct p9_trans *trans, | ||
232 | struct poll_table_struct *pt); | ||
233 | |||
234 | #ifdef P9_NONBLOCK | ||
235 | static int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc, | ||
236 | p9_conn_req_callback cb, void *a); | ||
237 | #endif /* P9_NONBLOCK */ | ||
238 | |||
239 | static void p9_conn_cancel(struct p9_conn *m, int err); | ||
240 | |||
241 | static u16 p9_mux_get_tag(struct p9_conn *m) | ||
242 | { | 159 | { |
243 | int tag; | 160 | unsigned long flags; |
161 | int i; | ||
244 | 162 | ||
245 | tag = p9_idpool_get(m->tagpool); | 163 | for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { |
246 | if (tag < 0) | 164 | struct p9_poll_wait *pwait = &m->poll_wait[i]; |
247 | return P9_NOTAG; | ||
248 | else | ||
249 | return (u16) tag; | ||
250 | } | ||
251 | 165 | ||
252 | static void p9_mux_put_tag(struct p9_conn *m, u16 tag) | 166 | if (pwait->wait_addr) { |
253 | { | 167 | remove_wait_queue(pwait->wait_addr, &pwait->wait); |
254 | if (tag != P9_NOTAG && p9_idpool_check(tag, m->tagpool)) | 168 | pwait->wait_addr = NULL; |
255 | p9_idpool_put(tag, m->tagpool); | 169 | } |
170 | } | ||
171 | |||
172 | spin_lock_irqsave(&p9_poll_lock, flags); | ||
173 | list_del_init(&m->poll_pending_link); | ||
174 | spin_unlock_irqrestore(&p9_poll_lock, flags); | ||
256 | } | 175 | } |
257 | 176 | ||
258 | /** | 177 | /** |
259 | * p9_mux_calc_poll_procs - calculates the number of polling procs | 178 | * p9_conn_cancel - cancel all pending requests with error |
260 | * @muxnum: number of mounts | 179 | * @m: mux data |
180 | * @err: error code | ||
261 | * | 181 | * |
262 | * Calculation is based on the number of mounted v9fs filesystems. | ||
263 | * The current implementation returns sqrt of the number of mounts. | ||
264 | */ | 182 | */ |
265 | 183 | ||
266 | static int p9_mux_calc_poll_procs(int muxnum) | 184 | static void p9_conn_cancel(struct p9_conn *m, int err) |
267 | { | 185 | { |
268 | int n; | 186 | struct p9_req_t *req, *rtmp; |
269 | 187 | unsigned long flags; | |
270 | if (p9_mux_poll_task_num) | 188 | LIST_HEAD(cancel_list); |
271 | n = muxnum / p9_mux_poll_task_num + | ||
272 | (muxnum % p9_mux_poll_task_num ? 1 : 0); | ||
273 | else | ||
274 | n = 1; | ||
275 | |||
276 | if (n > ARRAY_SIZE(p9_mux_poll_tasks)) | ||
277 | n = ARRAY_SIZE(p9_mux_poll_tasks); | ||
278 | |||
279 | return n; | ||
280 | } | ||
281 | 189 | ||
282 | static int p9_mux_poll_start(struct p9_conn *m) | 190 | P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); |
283 | { | ||
284 | int i, n; | ||
285 | struct p9_mux_poll_task *vpt, *vptlast; | ||
286 | struct task_struct *pproc; | ||
287 | |||
288 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, p9_mux_num, | ||
289 | p9_mux_poll_task_num); | ||
290 | mutex_lock(&p9_mux_task_lock); | ||
291 | |||
292 | n = p9_mux_calc_poll_procs(p9_mux_num + 1); | ||
293 | if (n > p9_mux_poll_task_num) { | ||
294 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) { | ||
295 | if (p9_mux_poll_tasks[i].task == NULL) { | ||
296 | vpt = &p9_mux_poll_tasks[i]; | ||
297 | P9_DPRINTK(P9_DEBUG_MUX, "create proc %p\n", | ||
298 | vpt); | ||
299 | pproc = kthread_create(p9_poll_proc, vpt, | ||
300 | "v9fs-poll"); | ||
301 | |||
302 | if (!IS_ERR(pproc)) { | ||
303 | vpt->task = pproc; | ||
304 | INIT_LIST_HEAD(&vpt->mux_list); | ||
305 | vpt->muxnum = 0; | ||
306 | p9_mux_poll_task_num++; | ||
307 | wake_up_process(vpt->task); | ||
308 | } | ||
309 | break; | ||
310 | } | ||
311 | } | ||
312 | 191 | ||
313 | if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) | 192 | spin_lock_irqsave(&m->client->lock, flags); |
314 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
315 | "warning: no free poll slots\n"); | ||
316 | } | ||
317 | 193 | ||
318 | n = (p9_mux_num + 1) / p9_mux_poll_task_num + | 194 | if (m->err) { |
319 | ((p9_mux_num + 1) % p9_mux_poll_task_num ? 1 : 0); | 195 | spin_unlock_irqrestore(&m->client->lock, flags); |
320 | 196 | return; | |
321 | vptlast = NULL; | ||
322 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) { | ||
323 | vpt = &p9_mux_poll_tasks[i]; | ||
324 | if (vpt->task != NULL) { | ||
325 | vptlast = vpt; | ||
326 | if (vpt->muxnum < n) { | ||
327 | P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); | ||
328 | list_add(&m->mux_list, &vpt->mux_list); | ||
329 | vpt->muxnum++; | ||
330 | m->poll_task = vpt; | ||
331 | memset(&m->poll_waddr, 0, | ||
332 | sizeof(m->poll_waddr)); | ||
333 | init_poll_funcptr(&m->pt, p9_pollwait); | ||
334 | break; | ||
335 | } | ||
336 | } | ||
337 | } | 197 | } |
338 | 198 | ||
339 | if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) { | 199 | m->err = err; |
340 | if (vptlast == NULL) { | ||
341 | mutex_unlock(&p9_mux_task_lock); | ||
342 | return -ENOMEM; | ||
343 | } | ||
344 | 200 | ||
345 | P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); | 201 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { |
346 | list_add(&m->mux_list, &vptlast->mux_list); | 202 | req->status = REQ_STATUS_ERROR; |
347 | vptlast->muxnum++; | 203 | if (!req->t_err) |
348 | m->poll_task = vptlast; | 204 | req->t_err = err; |
349 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); | 205 | list_move(&req->req_list, &cancel_list); |
350 | init_poll_funcptr(&m->pt, p9_pollwait); | ||
351 | } | 206 | } |
352 | 207 | list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { | |
353 | p9_mux_num++; | 208 | req->status = REQ_STATUS_ERROR; |
354 | mutex_unlock(&p9_mux_task_lock); | 209 | if (!req->t_err) |
355 | 210 | req->t_err = err; | |
356 | return 0; | 211 | list_move(&req->req_list, &cancel_list); |
357 | } | ||
358 | |||
359 | static void p9_mux_poll_stop(struct p9_conn *m) | ||
360 | { | ||
361 | int i; | ||
362 | struct p9_mux_poll_task *vpt; | ||
363 | |||
364 | mutex_lock(&p9_mux_task_lock); | ||
365 | vpt = m->poll_task; | ||
366 | list_del(&m->mux_list); | ||
367 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { | ||
368 | if (m->poll_waddr[i] != NULL) { | ||
369 | remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]); | ||
370 | m->poll_waddr[i] = NULL; | ||
371 | } | ||
372 | } | 212 | } |
373 | vpt->muxnum--; | 213 | spin_unlock_irqrestore(&m->client->lock, flags); |
374 | if (!vpt->muxnum) { | 214 | |
375 | P9_DPRINTK(P9_DEBUG_MUX, "destroy proc %p\n", vpt); | 215 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { |
376 | kthread_stop(vpt->task); | 216 | list_del(&req->req_list); |
377 | vpt->task = NULL; | 217 | P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req); |
378 | p9_mux_poll_task_num--; | 218 | p9_client_cb(m->client, req); |
379 | } | 219 | } |
380 | p9_mux_num--; | ||
381 | mutex_unlock(&p9_mux_task_lock); | ||
382 | } | 220 | } |
383 | 221 | ||
384 | /** | 222 | static unsigned int |
385 | * p9_conn_create - allocate and initialize the per-session mux data | 223 | p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt) |
386 | * @trans: transport structure | ||
387 | * | ||
388 | * Note: Creates the polling task if this is the first session. | ||
389 | */ | ||
390 | |||
391 | static struct p9_conn *p9_conn_create(struct p9_trans *trans) | ||
392 | { | 224 | { |
393 | int i, n; | 225 | int ret, n; |
394 | struct p9_conn *m; | 226 | struct p9_trans_fd *ts = NULL; |
395 | 227 | ||
396 | P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans, | 228 | if (client && client->status == Connected) |
397 | trans->msize); | 229 | ts = client->trans; |
398 | m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL); | ||
399 | if (!m) | ||
400 | return ERR_PTR(-ENOMEM); | ||
401 | 230 | ||
402 | spin_lock_init(&m->lock); | 231 | if (!ts) |
403 | INIT_LIST_HEAD(&m->mux_list); | 232 | return -EREMOTEIO; |
404 | m->msize = trans->msize; | ||
405 | m->extended = trans->extended; | ||
406 | m->trans = trans; | ||
407 | m->tagpool = p9_idpool_create(); | ||
408 | if (IS_ERR(m->tagpool)) { | ||
409 | kfree(m); | ||
410 | return ERR_PTR(-ENOMEM); | ||
411 | } | ||
412 | 233 | ||
413 | INIT_LIST_HEAD(&m->req_list); | 234 | if (!ts->rd->f_op || !ts->rd->f_op->poll) |
414 | INIT_LIST_HEAD(&m->unsent_req_list); | 235 | return -EIO; |
415 | INIT_WORK(&m->rq, p9_read_work); | ||
416 | INIT_WORK(&m->wq, p9_write_work); | ||
417 | n = p9_mux_poll_start(m); | ||
418 | if (n) { | ||
419 | kfree(m); | ||
420 | return ERR_PTR(n); | ||
421 | } | ||
422 | 236 | ||
423 | n = p9_fd_poll(trans, &m->pt); | 237 | if (!ts->wr->f_op || !ts->wr->f_op->poll) |
424 | if (n & POLLIN) { | 238 | return -EIO; |
425 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m); | ||
426 | set_bit(Rpending, &m->wsched); | ||
427 | } | ||
428 | 239 | ||
429 | if (n & POLLOUT) { | 240 | ret = ts->rd->f_op->poll(ts->rd, pt); |
430 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m); | 241 | if (ret < 0) |
431 | set_bit(Wpending, &m->wsched); | 242 | return ret; |
432 | } | ||
433 | 243 | ||
434 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { | 244 | if (ts->rd != ts->wr) { |
435 | if (IS_ERR(m->poll_waddr[i])) { | 245 | n = ts->wr->f_op->poll(ts->wr, pt); |
436 | p9_mux_poll_stop(m); | 246 | if (n < 0) |
437 | kfree(m); | 247 | return n; |
438 | return (void *)m->poll_waddr; /* the error code */ | 248 | ret = (ret & ~POLLOUT) | (n & ~POLLIN); |
439 | } | ||
440 | } | 249 | } |
441 | 250 | ||
442 | return m; | 251 | return ret; |
443 | } | 252 | } |
444 | 253 | ||
445 | /** | 254 | /** |
446 | * p9_mux_destroy - cancels all pending requests and frees mux resources | 255 | * p9_fd_read- read from a fd |
447 | * @m: mux to destroy | 256 | * @client: client instance |
257 | * @v: buffer to receive data into | ||
258 | * @len: size of receive buffer | ||
448 | * | 259 | * |
449 | */ | 260 | */ |
450 | 261 | ||
451 | static void p9_conn_destroy(struct p9_conn *m) | 262 | static int p9_fd_read(struct p9_client *client, void *v, int len) |
452 | { | 263 | { |
453 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m, | 264 | int ret; |
454 | m->mux_list.prev, m->mux_list.next); | 265 | struct p9_trans_fd *ts = NULL; |
455 | 266 | ||
456 | p9_mux_poll_stop(m); | 267 | if (client && client->status != Disconnected) |
457 | cancel_work_sync(&m->rq); | 268 | ts = client->trans; |
458 | cancel_work_sync(&m->wq); | ||
459 | 269 | ||
460 | p9_conn_cancel(m, -ECONNRESET); | 270 | if (!ts) |
271 | return -EREMOTEIO; | ||
461 | 272 | ||
462 | m->trans = NULL; | 273 | if (!(ts->rd->f_flags & O_NONBLOCK)) |
463 | p9_idpool_destroy(m->tagpool); | 274 | P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n"); |
464 | kfree(m); | 275 | |
276 | ret = kernel_read(ts->rd, ts->rd->f_pos, v, len); | ||
277 | if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) | ||
278 | client->status = Disconnected; | ||
279 | return ret; | ||
465 | } | 280 | } |
466 | 281 | ||
467 | /** | 282 | /** |
468 | * p9_pollwait - add poll task to the wait queue | 283 | * p9_read_work - called when there is some data to be read from a transport |
469 | * @filp: file pointer being polled | 284 | * @work: container of work to be done |
470 | * @wait_address: wait_q to block on | ||
471 | * @p: poll state | ||
472 | * | 285 | * |
473 | * called by files poll operation to add v9fs-poll task to files wait queue | ||
474 | */ | 286 | */ |
475 | 287 | ||
476 | static void | 288 | static void p9_read_work(struct work_struct *work) |
477 | p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p) | ||
478 | { | 289 | { |
479 | int i; | 290 | int n, err; |
480 | struct p9_conn *m; | 291 | struct p9_conn *m; |
481 | 292 | ||
482 | m = container_of(p, struct p9_conn, pt); | 293 | m = container_of(work, struct p9_conn, rq); |
483 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) | ||
484 | if (m->poll_waddr[i] == NULL) | ||
485 | break; | ||
486 | 294 | ||
487 | if (i >= ARRAY_SIZE(m->poll_waddr)) { | 295 | if (m->err < 0) |
488 | P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n"); | ||
489 | return; | 296 | return; |
490 | } | ||
491 | 297 | ||
492 | m->poll_waddr[i] = wait_address; | 298 | P9_DPRINTK(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos); |
493 | 299 | ||
494 | if (!wait_address) { | 300 | if (!m->rbuf) { |
495 | P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n"); | 301 | m->rbuf = m->tmp_buf; |
496 | m->poll_waddr[i] = ERR_PTR(-EIO); | 302 | m->rpos = 0; |
303 | m->rsize = 7; /* start by reading header */ | ||
304 | } | ||
305 | |||
306 | clear_bit(Rpending, &m->wsched); | ||
307 | P9_DPRINTK(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", m, | ||
308 | m->rpos, m->rsize, m->rsize-m->rpos); | ||
309 | err = p9_fd_read(m->client, m->rbuf + m->rpos, | ||
310 | m->rsize - m->rpos); | ||
311 | P9_DPRINTK(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err); | ||
312 | if (err == -EAGAIN) { | ||
313 | clear_bit(Rworksched, &m->wsched); | ||
497 | return; | 314 | return; |
498 | } | 315 | } |
499 | 316 | ||
500 | init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task); | 317 | if (err <= 0) |
501 | add_wait_queue(wait_address, &m->poll_wait[i]); | 318 | goto error; |
502 | } | ||
503 | 319 | ||
504 | /** | 320 | m->rpos += err; |
505 | * p9_poll_mux - polls a mux and schedules read or write works if necessary | ||
506 | * @m: connection to poll | ||
507 | * | ||
508 | */ | ||
509 | 321 | ||
510 | static void p9_poll_mux(struct p9_conn *m) | 322 | if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */ |
511 | { | 323 | u16 tag; |
512 | int n; | 324 | P9_DPRINTK(P9_DEBUG_TRANS, "got new header\n"); |
513 | 325 | ||
514 | if (m->err < 0) | 326 | n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */ |
515 | return; | 327 | if (n >= m->client->msize) { |
328 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
329 | "requested packet size too big: %d\n", n); | ||
330 | err = -EIO; | ||
331 | goto error; | ||
332 | } | ||
516 | 333 | ||
517 | n = p9_fd_poll(m->trans, NULL); | 334 | tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */ |
518 | if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) { | 335 | P9_DPRINTK(P9_DEBUG_TRANS, |
519 | P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n); | 336 | "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); |
520 | if (n >= 0) | ||
521 | n = -ECONNRESET; | ||
522 | p9_conn_cancel(m, n); | ||
523 | } | ||
524 | 337 | ||
525 | if (n & POLLIN) { | 338 | m->req = p9_tag_lookup(m->client, tag); |
526 | set_bit(Rpending, &m->wsched); | 339 | if (!m->req) { |
527 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m); | 340 | P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", |
528 | if (!test_and_set_bit(Rworksched, &m->wsched)) { | 341 | tag); |
529 | P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m); | 342 | err = -EIO; |
530 | queue_work(p9_mux_wq, &m->rq); | 343 | goto error; |
531 | } | 344 | } |
532 | } | ||
533 | 345 | ||
534 | if (n & POLLOUT) { | 346 | if (m->req->rc == NULL) { |
535 | set_bit(Wpending, &m->wsched); | 347 | m->req->rc = kmalloc(sizeof(struct p9_fcall) + |
536 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m); | 348 | m->client->msize, GFP_KERNEL); |
537 | if ((m->wsize || !list_empty(&m->unsent_req_list)) | 349 | if (!m->req->rc) { |
538 | && !test_and_set_bit(Wworksched, &m->wsched)) { | 350 | m->req = NULL; |
539 | P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m); | 351 | err = -ENOMEM; |
540 | queue_work(p9_mux_wq, &m->wq); | 352 | goto error; |
353 | } | ||
541 | } | 354 | } |
355 | m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall); | ||
356 | memcpy(m->rbuf, m->tmp_buf, m->rsize); | ||
357 | m->rsize = n; | ||
542 | } | 358 | } |
359 | |||
360 | /* not an else because some packets (like clunk) have no payload */ | ||
361 | if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */ | ||
362 | P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n"); | ||
363 | spin_lock(&m->client->lock); | ||
364 | list_del(&m->req->req_list); | ||
365 | spin_unlock(&m->client->lock); | ||
366 | p9_client_cb(m->client, m->req); | ||
367 | |||
368 | m->rbuf = NULL; | ||
369 | m->rpos = 0; | ||
370 | m->rsize = 0; | ||
371 | m->req = NULL; | ||
372 | } | ||
373 | |||
374 | if (!list_empty(&m->req_list)) { | ||
375 | if (test_and_clear_bit(Rpending, &m->wsched)) | ||
376 | n = POLLIN; | ||
377 | else | ||
378 | n = p9_fd_poll(m->client, NULL); | ||
379 | |||
380 | if (n & POLLIN) { | ||
381 | P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); | ||
382 | queue_work(p9_mux_wq, &m->rq); | ||
383 | } else | ||
384 | clear_bit(Rworksched, &m->wsched); | ||
385 | } else | ||
386 | clear_bit(Rworksched, &m->wsched); | ||
387 | |||
388 | return; | ||
389 | error: | ||
390 | p9_conn_cancel(m, err); | ||
391 | clear_bit(Rworksched, &m->wsched); | ||
543 | } | 392 | } |
544 | 393 | ||
545 | /** | 394 | /** |
546 | * p9_poll_proc - poll worker thread | 395 | * p9_fd_write - write to a socket |
547 | * @a: thread state and arguments | 396 | * @client: client instance |
548 | * | 397 | * @v: buffer to send data from |
549 | * polls all v9fs transports for new events and queues the appropriate | 398 | * @len: size of send buffer |
550 | * work to the work queue | ||
551 | * | 399 | * |
552 | */ | 400 | */ |
553 | 401 | ||
554 | static int p9_poll_proc(void *a) | 402 | static int p9_fd_write(struct p9_client *client, void *v, int len) |
555 | { | 403 | { |
556 | struct p9_conn *m, *mtmp; | 404 | int ret; |
557 | struct p9_mux_poll_task *vpt; | 405 | mm_segment_t oldfs; |
406 | struct p9_trans_fd *ts = NULL; | ||
558 | 407 | ||
559 | vpt = a; | 408 | if (client && client->status != Disconnected) |
560 | P9_DPRINTK(P9_DEBUG_MUX, "start %p %p\n", current, vpt); | 409 | ts = client->trans; |
561 | while (!kthread_should_stop()) { | ||
562 | set_current_state(TASK_INTERRUPTIBLE); | ||
563 | 410 | ||
564 | list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) { | 411 | if (!ts) |
565 | p9_poll_mux(m); | 412 | return -EREMOTEIO; |
566 | } | ||
567 | 413 | ||
568 | P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n"); | 414 | if (!(ts->wr->f_flags & O_NONBLOCK)) |
569 | schedule_timeout(SCHED_TIMEOUT * HZ); | 415 | P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n"); |
570 | } | ||
571 | 416 | ||
572 | __set_current_state(TASK_RUNNING); | 417 | oldfs = get_fs(); |
573 | P9_DPRINTK(P9_DEBUG_MUX, "finish\n"); | 418 | set_fs(get_ds()); |
574 | return 0; | 419 | /* The cast to a user pointer is valid due to the set_fs() */ |
420 | ret = vfs_write(ts->wr, (void __user *)v, len, &ts->wr->f_pos); | ||
421 | set_fs(oldfs); | ||
422 | |||
423 | if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) | ||
424 | client->status = Disconnected; | ||
425 | return ret; | ||
575 | } | 426 | } |
576 | 427 | ||
577 | /** | 428 | /** |
@@ -584,7 +435,7 @@ static void p9_write_work(struct work_struct *work) | |||
584 | { | 435 | { |
585 | int n, err; | 436 | int n, err; |
586 | struct p9_conn *m; | 437 | struct p9_conn *m; |
587 | struct p9_req *req; | 438 | struct p9_req_t *req; |
588 | 439 | ||
589 | m = container_of(work, struct p9_conn, wq); | 440 | m = container_of(work, struct p9_conn, wq); |
590 | 441 | ||
@@ -599,25 +450,23 @@ static void p9_write_work(struct work_struct *work) | |||
599 | return; | 450 | return; |
600 | } | 451 | } |
601 | 452 | ||
602 | spin_lock(&m->lock); | 453 | spin_lock(&m->client->lock); |
603 | again: | 454 | req = list_entry(m->unsent_req_list.next, struct p9_req_t, |
604 | req = list_entry(m->unsent_req_list.next, struct p9_req, | ||
605 | req_list); | 455 | req_list); |
456 | req->status = REQ_STATUS_SENT; | ||
606 | list_move_tail(&req->req_list, &m->req_list); | 457 | list_move_tail(&req->req_list, &m->req_list); |
607 | if (req->err == ERREQFLUSH) | ||
608 | goto again; | ||
609 | 458 | ||
610 | m->wbuf = req->tcall->sdata; | 459 | m->wbuf = req->tc->sdata; |
611 | m->wsize = req->tcall->size; | 460 | m->wsize = req->tc->size; |
612 | m->wpos = 0; | 461 | m->wpos = 0; |
613 | spin_unlock(&m->lock); | 462 | spin_unlock(&m->client->lock); |
614 | } | 463 | } |
615 | 464 | ||
616 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, | 465 | P9_DPRINTK(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", m, m->wpos, |
617 | m->wsize); | 466 | m->wsize); |
618 | clear_bit(Wpending, &m->wsched); | 467 | clear_bit(Wpending, &m->wsched); |
619 | err = p9_fd_write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos); | 468 | err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos); |
620 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err); | 469 | P9_DPRINTK(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err); |
621 | if (err == -EAGAIN) { | 470 | if (err == -EAGAIN) { |
622 | clear_bit(Wworksched, &m->wsched); | 471 | clear_bit(Wworksched, &m->wsched); |
623 | return; | 472 | return; |
@@ -638,10 +487,10 @@ again: | |||
638 | if (test_and_clear_bit(Wpending, &m->wsched)) | 487 | if (test_and_clear_bit(Wpending, &m->wsched)) |
639 | n = POLLOUT; | 488 | n = POLLOUT; |
640 | else | 489 | else |
641 | n = p9_fd_poll(m->trans, NULL); | 490 | n = p9_fd_poll(m->client, NULL); |
642 | 491 | ||
643 | if (n & POLLOUT) { | 492 | if (n & POLLOUT) { |
644 | P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m); | 493 | P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); |
645 | queue_work(p9_mux_wq, &m->wq); | 494 | queue_work(p9_mux_wq, &m->wq); |
646 | } else | 495 | } else |
647 | clear_bit(Wworksched, &m->wsched); | 496 | clear_bit(Wworksched, &m->wsched); |
@@ -655,504 +504,197 @@ error: | |||
655 | clear_bit(Wworksched, &m->wsched); | 504 | clear_bit(Wworksched, &m->wsched); |
656 | } | 505 | } |
657 | 506 | ||
658 | static void process_request(struct p9_conn *m, struct p9_req *req) | 507 | static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) |
659 | { | 508 | { |
660 | int ecode; | 509 | struct p9_poll_wait *pwait = |
661 | struct p9_str *ename; | 510 | container_of(wait, struct p9_poll_wait, wait); |
662 | 511 | struct p9_conn *m = pwait->conn; | |
663 | if (!req->err && req->rcall->id == P9_RERROR) { | 512 | unsigned long flags; |
664 | ecode = req->rcall->params.rerror.errno; | 513 | DECLARE_WAITQUEUE(dummy_wait, p9_poll_task); |
665 | ename = &req->rcall->params.rerror.error; | ||
666 | |||
667 | P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len, | ||
668 | ename->str); | ||
669 | |||
670 | if (m->extended) | ||
671 | req->err = -ecode; | ||
672 | 514 | ||
673 | if (!req->err) { | 515 | spin_lock_irqsave(&p9_poll_lock, flags); |
674 | req->err = p9_errstr2errno(ename->str, ename->len); | 516 | if (list_empty(&m->poll_pending_link)) |
517 | list_add_tail(&m->poll_pending_link, &p9_poll_pending_list); | ||
518 | spin_unlock_irqrestore(&p9_poll_lock, flags); | ||
675 | 519 | ||
676 | /* string match failed */ | 520 | /* perform the default wake up operation */ |
677 | if (!req->err) { | 521 | return default_wake_function(&dummy_wait, mode, sync, key); |
678 | PRINT_FCALL_ERROR("unknown error", req->rcall); | ||
679 | req->err = -ESERVERFAULT; | ||
680 | } | ||
681 | } | ||
682 | } else if (req->tcall && req->rcall->id != req->tcall->id + 1) { | ||
683 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
684 | "fcall mismatch: expected %d, got %d\n", | ||
685 | req->tcall->id + 1, req->rcall->id); | ||
686 | if (!req->err) | ||
687 | req->err = -EIO; | ||
688 | } | ||
689 | } | 522 | } |
690 | 523 | ||
691 | /** | 524 | /** |
692 | * p9_read_work - called when there is some data to be read from a transport | 525 | * p9_pollwait - add poll task to the wait queue |
693 | * @work: container of work to be done | 526 | * @filp: file pointer being polled |
527 | * @wait_address: wait_q to block on | ||
528 | * @p: poll state | ||
694 | * | 529 | * |
530 | * called by files poll operation to add v9fs-poll task to files wait queue | ||
695 | */ | 531 | */ |
696 | 532 | ||
697 | static void p9_read_work(struct work_struct *work) | 533 | static void |
534 | p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p) | ||
698 | { | 535 | { |
699 | int n, err; | 536 | struct p9_conn *m = container_of(p, struct p9_conn, pt); |
700 | struct p9_conn *m; | 537 | struct p9_poll_wait *pwait = NULL; |
701 | struct p9_req *req, *rptr, *rreq; | 538 | int i; |
702 | struct p9_fcall *rcall; | ||
703 | char *rbuf; | ||
704 | |||
705 | m = container_of(work, struct p9_conn, rq); | ||
706 | |||
707 | if (m->err < 0) | ||
708 | return; | ||
709 | |||
710 | rcall = NULL; | ||
711 | P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos); | ||
712 | 539 | ||
713 | if (!m->rcall) { | 540 | for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { |
714 | m->rcall = | 541 | if (m->poll_wait[i].wait_addr == NULL) { |
715 | kmalloc(sizeof(struct p9_fcall) + m->msize, GFP_KERNEL); | 542 | pwait = &m->poll_wait[i]; |
716 | if (!m->rcall) { | 543 | break; |
717 | err = -ENOMEM; | ||
718 | goto error; | ||
719 | } | 544 | } |
720 | |||
721 | m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall); | ||
722 | m->rpos = 0; | ||
723 | } | 545 | } |
724 | 546 | ||
725 | clear_bit(Rpending, &m->wsched); | 547 | if (!pwait) { |
726 | err = p9_fd_read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos); | 548 | P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n"); |
727 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err); | ||
728 | if (err == -EAGAIN) { | ||
729 | clear_bit(Rworksched, &m->wsched); | ||
730 | return; | 549 | return; |
731 | } | 550 | } |
732 | 551 | ||
733 | if (err <= 0) | 552 | pwait->conn = m; |
734 | goto error; | 553 | pwait->wait_addr = wait_address; |
735 | 554 | init_waitqueue_func_entry(&pwait->wait, p9_pollwake); | |
736 | m->rpos += err; | 555 | add_wait_queue(wait_address, &pwait->wait); |
737 | while (m->rpos > 4) { | ||
738 | n = le32_to_cpu(*(__le32 *) m->rbuf); | ||
739 | if (n >= m->msize) { | ||
740 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
741 | "requested packet size too big: %d\n", n); | ||
742 | err = -EIO; | ||
743 | goto error; | ||
744 | } | ||
745 | |||
746 | if (m->rpos < n) | ||
747 | break; | ||
748 | |||
749 | err = | ||
750 | p9_deserialize_fcall(m->rbuf, n, m->rcall, m->extended); | ||
751 | if (err < 0) | ||
752 | goto error; | ||
753 | |||
754 | #ifdef CONFIG_NET_9P_DEBUG | ||
755 | if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { | ||
756 | char buf[150]; | ||
757 | |||
758 | p9_printfcall(buf, sizeof(buf), m->rcall, | ||
759 | m->extended); | ||
760 | printk(KERN_NOTICE ">>> %p %s\n", m, buf); | ||
761 | } | ||
762 | #endif | ||
763 | |||
764 | rcall = m->rcall; | ||
765 | rbuf = m->rbuf; | ||
766 | if (m->rpos > n) { | ||
767 | m->rcall = kmalloc(sizeof(struct p9_fcall) + m->msize, | ||
768 | GFP_KERNEL); | ||
769 | if (!m->rcall) { | ||
770 | err = -ENOMEM; | ||
771 | goto error; | ||
772 | } | ||
773 | |||
774 | m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall); | ||
775 | memmove(m->rbuf, rbuf + n, m->rpos - n); | ||
776 | m->rpos -= n; | ||
777 | } else { | ||
778 | m->rcall = NULL; | ||
779 | m->rbuf = NULL; | ||
780 | m->rpos = 0; | ||
781 | } | ||
782 | |||
783 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, | ||
784 | rcall->id, rcall->tag); | ||
785 | |||
786 | req = NULL; | ||
787 | spin_lock(&m->lock); | ||
788 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | ||
789 | if (rreq->tag == rcall->tag) { | ||
790 | req = rreq; | ||
791 | if (req->flush != Flushing) | ||
792 | list_del(&req->req_list); | ||
793 | break; | ||
794 | } | ||
795 | } | ||
796 | spin_unlock(&m->lock); | ||
797 | |||
798 | if (req) { | ||
799 | req->rcall = rcall; | ||
800 | process_request(m, req); | ||
801 | |||
802 | if (req->flush != Flushing) { | ||
803 | if (req->cb) | ||
804 | (*req->cb) (req, req->cba); | ||
805 | else | ||
806 | kfree(req->rcall); | ||
807 | } | ||
808 | } else { | ||
809 | if (err >= 0 && rcall->id != P9_RFLUSH) | ||
810 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
811 | "unexpected response mux %p id %d tag %d\n", | ||
812 | m, rcall->id, rcall->tag); | ||
813 | kfree(rcall); | ||
814 | } | ||
815 | } | ||
816 | |||
817 | if (!list_empty(&m->req_list)) { | ||
818 | if (test_and_clear_bit(Rpending, &m->wsched)) | ||
819 | n = POLLIN; | ||
820 | else | ||
821 | n = p9_fd_poll(m->trans, NULL); | ||
822 | |||
823 | if (n & POLLIN) { | ||
824 | P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m); | ||
825 | queue_work(p9_mux_wq, &m->rq); | ||
826 | } else | ||
827 | clear_bit(Rworksched, &m->wsched); | ||
828 | } else | ||
829 | clear_bit(Rworksched, &m->wsched); | ||
830 | |||
831 | return; | ||
832 | |||
833 | error: | ||
834 | p9_conn_cancel(m, err); | ||
835 | clear_bit(Rworksched, &m->wsched); | ||
836 | } | 556 | } |
837 | 557 | ||
838 | /** | 558 | /** |
839 | * p9_send_request - send 9P request | 559 | * p9_conn_create - allocate and initialize the per-session mux data |
840 | * The function can sleep until the request is scheduled for sending. | 560 | * @client: client instance |
841 | * The function can be interrupted. Return from the function is not | ||
842 | * a guarantee that the request is sent successfully. Can return errors | ||
843 | * that can be retrieved by PTR_ERR macros. | ||
844 | * | ||
845 | * @m: mux data | ||
846 | * @tc: request to be sent | ||
847 | * @cb: callback function to call when response is received | ||
848 | * @cba: parameter to pass to the callback function | ||
849 | * | 561 | * |
562 | * Note: Creates the polling task if this is the first session. | ||
850 | */ | 563 | */ |
851 | 564 | ||
852 | static struct p9_req *p9_send_request(struct p9_conn *m, | 565 | static struct p9_conn *p9_conn_create(struct p9_client *client) |
853 | struct p9_fcall *tc, | ||
854 | p9_conn_req_callback cb, void *cba) | ||
855 | { | 566 | { |
856 | int n; | 567 | int n; |
857 | struct p9_req *req; | 568 | struct p9_conn *m; |
858 | |||
859 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current, | ||
860 | tc, tc->id); | ||
861 | if (m->err < 0) | ||
862 | return ERR_PTR(m->err); | ||
863 | |||
864 | req = kmalloc(sizeof(struct p9_req), GFP_KERNEL); | ||
865 | if (!req) | ||
866 | return ERR_PTR(-ENOMEM); | ||
867 | |||
868 | if (tc->id == P9_TVERSION) | ||
869 | n = P9_NOTAG; | ||
870 | else | ||
871 | n = p9_mux_get_tag(m); | ||
872 | 569 | ||
873 | if (n < 0) { | 570 | P9_DPRINTK(P9_DEBUG_TRANS, "client %p msize %d\n", client, |
874 | kfree(req); | 571 | client->msize); |
572 | m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL); | ||
573 | if (!m) | ||
875 | return ERR_PTR(-ENOMEM); | 574 | return ERR_PTR(-ENOMEM); |
876 | } | ||
877 | 575 | ||
878 | p9_set_tag(tc, n); | 576 | INIT_LIST_HEAD(&m->mux_list); |
577 | m->client = client; | ||
879 | 578 | ||
880 | #ifdef CONFIG_NET_9P_DEBUG | 579 | INIT_LIST_HEAD(&m->req_list); |
881 | if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { | 580 | INIT_LIST_HEAD(&m->unsent_req_list); |
882 | char buf[150]; | 581 | INIT_WORK(&m->rq, p9_read_work); |
582 | INIT_WORK(&m->wq, p9_write_work); | ||
583 | INIT_LIST_HEAD(&m->poll_pending_link); | ||
584 | init_poll_funcptr(&m->pt, p9_pollwait); | ||
883 | 585 | ||
884 | p9_printfcall(buf, sizeof(buf), tc, m->extended); | 586 | n = p9_fd_poll(client, &m->pt); |
885 | printk(KERN_NOTICE "<<< %p %s\n", m, buf); | 587 | if (n & POLLIN) { |
588 | P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m); | ||
589 | set_bit(Rpending, &m->wsched); | ||
886 | } | 590 | } |
887 | #endif | ||
888 | |||
889 | spin_lock_init(&req->lock); | ||
890 | req->tag = n; | ||
891 | req->tcall = tc; | ||
892 | req->rcall = NULL; | ||
893 | req->err = 0; | ||
894 | req->cb = cb; | ||
895 | req->cba = cba; | ||
896 | req->flush = None; | ||
897 | |||
898 | spin_lock(&m->lock); | ||
899 | list_add_tail(&req->req_list, &m->unsent_req_list); | ||
900 | spin_unlock(&m->lock); | ||
901 | |||
902 | if (test_and_clear_bit(Wpending, &m->wsched)) | ||
903 | n = POLLOUT; | ||
904 | else | ||
905 | n = p9_fd_poll(m->trans, NULL); | ||
906 | 591 | ||
907 | if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) | 592 | if (n & POLLOUT) { |
908 | queue_work(p9_mux_wq, &m->wq); | 593 | P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m); |
594 | set_bit(Wpending, &m->wsched); | ||
595 | } | ||
909 | 596 | ||
910 | return req; | 597 | return m; |
911 | } | 598 | } |
912 | 599 | ||
913 | static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req) | 600 | /** |
914 | { | 601 | * p9_poll_mux - polls a mux and schedules read or write works if necessary |
915 | p9_mux_put_tag(m, req->tag); | 602 | * @m: connection to poll |
916 | kfree(req); | 603 | * |
917 | } | 604 | */ |
918 | 605 | ||
919 | static void p9_mux_flush_cb(struct p9_req *freq, void *a) | 606 | static void p9_poll_mux(struct p9_conn *m) |
920 | { | 607 | { |
921 | int tag; | 608 | int n; |
922 | struct p9_conn *m; | ||
923 | struct p9_req *req, *rreq, *rptr; | ||
924 | |||
925 | m = a; | ||
926 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, | ||
927 | freq->tcall, freq->rcall, freq->err, | ||
928 | freq->tcall->params.tflush.oldtag); | ||
929 | |||
930 | spin_lock(&m->lock); | ||
931 | tag = freq->tcall->params.tflush.oldtag; | ||
932 | req = NULL; | ||
933 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | ||
934 | if (rreq->tag == tag) { | ||
935 | req = rreq; | ||
936 | list_del(&req->req_list); | ||
937 | break; | ||
938 | } | ||
939 | } | ||
940 | spin_unlock(&m->lock); | ||
941 | 609 | ||
942 | if (req) { | 610 | if (m->err < 0) |
943 | spin_lock(&req->lock); | 611 | return; |
944 | req->flush = Flushed; | ||
945 | spin_unlock(&req->lock); | ||
946 | 612 | ||
947 | if (req->cb) | 613 | n = p9_fd_poll(m->client, NULL); |
948 | (*req->cb) (req, req->cba); | 614 | if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) { |
949 | else | 615 | P9_DPRINTK(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n); |
950 | kfree(req->rcall); | 616 | if (n >= 0) |
617 | n = -ECONNRESET; | ||
618 | p9_conn_cancel(m, n); | ||
951 | } | 619 | } |
952 | 620 | ||
953 | kfree(freq->tcall); | 621 | if (n & POLLIN) { |
954 | kfree(freq->rcall); | 622 | set_bit(Rpending, &m->wsched); |
955 | p9_mux_free_request(m, freq); | 623 | P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m); |
956 | } | 624 | if (!test_and_set_bit(Rworksched, &m->wsched)) { |
957 | 625 | P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); | |
958 | static int | 626 | queue_work(p9_mux_wq, &m->rq); |
959 | p9_mux_flush_request(struct p9_conn *m, struct p9_req *req) | 627 | } |
960 | { | ||
961 | struct p9_fcall *fc; | ||
962 | struct p9_req *rreq, *rptr; | ||
963 | |||
964 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); | ||
965 | |||
966 | /* if a response was received for a request, do nothing */ | ||
967 | spin_lock(&req->lock); | ||
968 | if (req->rcall || req->err) { | ||
969 | spin_unlock(&req->lock); | ||
970 | P9_DPRINTK(P9_DEBUG_MUX, | ||
971 | "mux %p req %p response already received\n", m, req); | ||
972 | return 0; | ||
973 | } | 628 | } |
974 | 629 | ||
975 | req->flush = Flushing; | 630 | if (n & POLLOUT) { |
976 | spin_unlock(&req->lock); | 631 | set_bit(Wpending, &m->wsched); |
977 | 632 | P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m); | |
978 | spin_lock(&m->lock); | 633 | if ((m->wsize || !list_empty(&m->unsent_req_list)) |
979 | /* if the request is not sent yet, just remove it from the list */ | 634 | && !test_and_set_bit(Wworksched, &m->wsched)) { |
980 | list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) { | 635 | P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); |
981 | if (rreq->tag == req->tag) { | 636 | queue_work(p9_mux_wq, &m->wq); |
982 | P9_DPRINTK(P9_DEBUG_MUX, | ||
983 | "mux %p req %p request is not sent yet\n", m, req); | ||
984 | list_del(&rreq->req_list); | ||
985 | req->flush = Flushed; | ||
986 | spin_unlock(&m->lock); | ||
987 | if (req->cb) | ||
988 | (*req->cb) (req, req->cba); | ||
989 | return 0; | ||
990 | } | 637 | } |
991 | } | 638 | } |
992 | spin_unlock(&m->lock); | ||
993 | |||
994 | clear_thread_flag(TIF_SIGPENDING); | ||
995 | fc = p9_create_tflush(req->tag); | ||
996 | p9_send_request(m, fc, p9_mux_flush_cb, m); | ||
997 | return 1; | ||
998 | } | ||
999 | |||
1000 | static void | ||
1001 | p9_conn_rpc_cb(struct p9_req *req, void *a) | ||
1002 | { | ||
1003 | struct p9_mux_rpc *r; | ||
1004 | |||
1005 | P9_DPRINTK(P9_DEBUG_MUX, "req %p r %p\n", req, a); | ||
1006 | r = a; | ||
1007 | r->rcall = req->rcall; | ||
1008 | r->err = req->err; | ||
1009 | |||
1010 | if (req->flush != None && !req->err) | ||
1011 | r->err = -ERESTARTSYS; | ||
1012 | |||
1013 | wake_up(&r->wqueue); | ||
1014 | } | 639 | } |
1015 | 640 | ||
1016 | /** | 641 | /** |
1017 | * p9_fd_rpc- sends 9P request and waits until a response is available. | 642 | * p9_fd_request - send 9P request |
1018 | * The function can be interrupted. | 643 | * The function can sleep until the request is scheduled for sending. |
1019 | * @t: transport data | 644 | * The function can be interrupted. Return from the function is not |
1020 | * @tc: request to be sent | 645 | * a guarantee that the request is sent successfully. |
1021 | * @rc: pointer where a pointer to the response is stored | 646 | * |
647 | * @client: client instance | ||
648 | * @req: request to be sent | ||
1022 | * | 649 | * |
1023 | */ | 650 | */ |
1024 | 651 | ||
1025 | int | 652 | static int p9_fd_request(struct p9_client *client, struct p9_req_t *req) |
1026 | p9_fd_rpc(struct p9_trans *t, struct p9_fcall *tc, struct p9_fcall **rc) | ||
1027 | { | 653 | { |
1028 | struct p9_trans_fd *p = t->priv; | 654 | int n; |
1029 | struct p9_conn *m = p->conn; | 655 | struct p9_trans_fd *ts = client->trans; |
1030 | int err, sigpending; | 656 | struct p9_conn *m = ts->conn; |
1031 | unsigned long flags; | ||
1032 | struct p9_req *req; | ||
1033 | struct p9_mux_rpc r; | ||
1034 | |||
1035 | r.err = 0; | ||
1036 | r.tcall = tc; | ||
1037 | r.rcall = NULL; | ||
1038 | r.m = m; | ||
1039 | init_waitqueue_head(&r.wqueue); | ||
1040 | |||
1041 | if (rc) | ||
1042 | *rc = NULL; | ||
1043 | |||
1044 | sigpending = 0; | ||
1045 | if (signal_pending(current)) { | ||
1046 | sigpending = 1; | ||
1047 | clear_thread_flag(TIF_SIGPENDING); | ||
1048 | } | ||
1049 | |||
1050 | req = p9_send_request(m, tc, p9_conn_rpc_cb, &r); | ||
1051 | if (IS_ERR(req)) { | ||
1052 | err = PTR_ERR(req); | ||
1053 | P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err); | ||
1054 | return err; | ||
1055 | } | ||
1056 | 657 | ||
1057 | err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); | 658 | P9_DPRINTK(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", m, |
1058 | if (r.err < 0) | 659 | current, req->tc, req->tc->id); |
1059 | err = r.err; | 660 | if (m->err < 0) |
1060 | 661 | return m->err; | |
1061 | if (err == -ERESTARTSYS && m->trans->status == Connected | ||
1062 | && m->err == 0) { | ||
1063 | if (p9_mux_flush_request(m, req)) { | ||
1064 | /* wait until we get response of the flush message */ | ||
1065 | do { | ||
1066 | clear_thread_flag(TIF_SIGPENDING); | ||
1067 | err = wait_event_interruptible(r.wqueue, | ||
1068 | r.rcall || r.err); | ||
1069 | } while (!r.rcall && !r.err && err == -ERESTARTSYS && | ||
1070 | m->trans->status == Connected && !m->err); | ||
1071 | |||
1072 | err = -ERESTARTSYS; | ||
1073 | } | ||
1074 | sigpending = 1; | ||
1075 | } | ||
1076 | 662 | ||
1077 | if (sigpending) { | 663 | spin_lock(&client->lock); |
1078 | spin_lock_irqsave(¤t->sighand->siglock, flags); | 664 | req->status = REQ_STATUS_UNSENT; |
1079 | recalc_sigpending(); | 665 | list_add_tail(&req->req_list, &m->unsent_req_list); |
1080 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 666 | spin_unlock(&client->lock); |
1081 | } | ||
1082 | 667 | ||
1083 | if (rc) | 668 | if (test_and_clear_bit(Wpending, &m->wsched)) |
1084 | *rc = r.rcall; | 669 | n = POLLOUT; |
1085 | else | 670 | else |
1086 | kfree(r.rcall); | 671 | n = p9_fd_poll(m->client, NULL); |
1087 | 672 | ||
1088 | p9_mux_free_request(m, req); | 673 | if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) |
1089 | if (err > 0) | 674 | queue_work(p9_mux_wq, &m->wq); |
1090 | err = -EIO; | ||
1091 | 675 | ||
1092 | return err; | 676 | return 0; |
1093 | } | 677 | } |
1094 | 678 | ||
1095 | #ifdef P9_NONBLOCK | 679 | static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req) |
1096 | /** | ||
1097 | * p9_conn_rpcnb - sends 9P request without waiting for response. | ||
1098 | * @m: mux data | ||
1099 | * @tc: request to be sent | ||
1100 | * @cb: callback function to be called when response arrives | ||
1101 | * @a: value to pass to the callback function | ||
1102 | * | ||
1103 | */ | ||
1104 | |||
1105 | int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc, | ||
1106 | p9_conn_req_callback cb, void *a) | ||
1107 | { | 680 | { |
1108 | int err; | 681 | struct p9_trans_fd *ts = client->trans; |
1109 | struct p9_req *req; | 682 | struct p9_conn *m = ts->conn; |
683 | int ret = 1; | ||
1110 | 684 | ||
1111 | req = p9_send_request(m, tc, cb, a); | 685 | P9_DPRINTK(P9_DEBUG_TRANS, "mux %p req %p\n", m, req); |
1112 | if (IS_ERR(req)) { | ||
1113 | err = PTR_ERR(req); | ||
1114 | P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err); | ||
1115 | return PTR_ERR(req); | ||
1116 | } | ||
1117 | 686 | ||
1118 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag); | 687 | spin_lock(&client->lock); |
1119 | return 0; | 688 | list_del(&req->req_list); |
1120 | } | ||
1121 | #endif /* P9_NONBLOCK */ | ||
1122 | 689 | ||
1123 | /** | 690 | if (req->status == REQ_STATUS_UNSENT) { |
1124 | * p9_conn_cancel - cancel all pending requests with error | 691 | req->status = REQ_STATUS_FLSHD; |
1125 | * @m: mux data | 692 | ret = 0; |
1126 | * @err: error code | ||
1127 | * | ||
1128 | */ | ||
1129 | |||
1130 | void p9_conn_cancel(struct p9_conn *m, int err) | ||
1131 | { | ||
1132 | struct p9_req *req, *rtmp; | ||
1133 | LIST_HEAD(cancel_list); | ||
1134 | |||
1135 | P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); | ||
1136 | m->err = err; | ||
1137 | spin_lock(&m->lock); | ||
1138 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { | ||
1139 | list_move(&req->req_list, &cancel_list); | ||
1140 | } | 693 | } |
1141 | list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { | ||
1142 | list_move(&req->req_list, &cancel_list); | ||
1143 | } | ||
1144 | spin_unlock(&m->lock); | ||
1145 | 694 | ||
1146 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { | 695 | spin_unlock(&client->lock); |
1147 | list_del(&req->req_list); | ||
1148 | if (!req->err) | ||
1149 | req->err = err; | ||
1150 | 696 | ||
1151 | if (req->cb) | 697 | return ret; |
1152 | (*req->cb) (req, req->cba); | ||
1153 | else | ||
1154 | kfree(req->rcall); | ||
1155 | } | ||
1156 | } | 698 | } |
1157 | 699 | ||
1158 | /** | 700 | /** |
@@ -1216,7 +758,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts) | |||
1216 | return 0; | 758 | return 0; |
1217 | } | 759 | } |
1218 | 760 | ||
1219 | static int p9_fd_open(struct p9_trans *trans, int rfd, int wfd) | 761 | static int p9_fd_open(struct p9_client *client, int rfd, int wfd) |
1220 | { | 762 | { |
1221 | struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd), | 763 | struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd), |
1222 | GFP_KERNEL); | 764 | GFP_KERNEL); |
@@ -1234,13 +776,13 @@ static int p9_fd_open(struct p9_trans *trans, int rfd, int wfd) | |||
1234 | return -EIO; | 776 | return -EIO; |
1235 | } | 777 | } |
1236 | 778 | ||
1237 | trans->priv = ts; | 779 | client->trans = ts; |
1238 | trans->status = Connected; | 780 | client->status = Connected; |
1239 | 781 | ||
1240 | return 0; | 782 | return 0; |
1241 | } | 783 | } |
1242 | 784 | ||
1243 | static int p9_socket_open(struct p9_trans *trans, struct socket *csocket) | 785 | static int p9_socket_open(struct p9_client *client, struct socket *csocket) |
1244 | { | 786 | { |
1245 | int fd, ret; | 787 | int fd, ret; |
1246 | 788 | ||
@@ -1251,137 +793,65 @@ static int p9_socket_open(struct p9_trans *trans, struct socket *csocket) | |||
1251 | return fd; | 793 | return fd; |
1252 | } | 794 | } |
1253 | 795 | ||
1254 | ret = p9_fd_open(trans, fd, fd); | 796 | ret = p9_fd_open(client, fd, fd); |
1255 | if (ret < 0) { | 797 | if (ret < 0) { |
1256 | P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n"); | 798 | P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n"); |
1257 | sockfd_put(csocket); | 799 | sockfd_put(csocket); |
1258 | return ret; | 800 | return ret; |
1259 | } | 801 | } |
1260 | 802 | ||
1261 | ((struct p9_trans_fd *)trans->priv)->rd->f_flags |= O_NONBLOCK; | 803 | ((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK; |
1262 | 804 | ||
1263 | return 0; | 805 | return 0; |
1264 | } | 806 | } |
1265 | 807 | ||
1266 | /** | 808 | /** |
1267 | * p9_fd_read- read from a fd | 809 | * p9_mux_destroy - cancels all pending requests and frees mux resources |
1268 | * @trans: transport instance state | 810 | * @m: mux to destroy |
1269 | * @v: buffer to receive data into | ||
1270 | * @len: size of receive buffer | ||
1271 | * | ||
1272 | */ | ||
1273 | |||
1274 | static int p9_fd_read(struct p9_trans *trans, void *v, int len) | ||
1275 | { | ||
1276 | int ret; | ||
1277 | struct p9_trans_fd *ts = NULL; | ||
1278 | |||
1279 | if (trans && trans->status != Disconnected) | ||
1280 | ts = trans->priv; | ||
1281 | |||
1282 | if (!ts) | ||
1283 | return -EREMOTEIO; | ||
1284 | |||
1285 | if (!(ts->rd->f_flags & O_NONBLOCK)) | ||
1286 | P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n"); | ||
1287 | |||
1288 | ret = kernel_read(ts->rd, ts->rd->f_pos, v, len); | ||
1289 | if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) | ||
1290 | trans->status = Disconnected; | ||
1291 | return ret; | ||
1292 | } | ||
1293 | |||
1294 | /** | ||
1295 | * p9_fd_write - write to a socket | ||
1296 | * @trans: transport instance state | ||
1297 | * @v: buffer to send data from | ||
1298 | * @len: size of send buffer | ||
1299 | * | 811 | * |
1300 | */ | 812 | */ |
1301 | 813 | ||
1302 | static int p9_fd_write(struct p9_trans *trans, void *v, int len) | 814 | static void p9_conn_destroy(struct p9_conn *m) |
1303 | { | ||
1304 | int ret; | ||
1305 | mm_segment_t oldfs; | ||
1306 | struct p9_trans_fd *ts = NULL; | ||
1307 | |||
1308 | if (trans && trans->status != Disconnected) | ||
1309 | ts = trans->priv; | ||
1310 | |||
1311 | if (!ts) | ||
1312 | return -EREMOTEIO; | ||
1313 | |||
1314 | if (!(ts->wr->f_flags & O_NONBLOCK)) | ||
1315 | P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n"); | ||
1316 | |||
1317 | oldfs = get_fs(); | ||
1318 | set_fs(get_ds()); | ||
1319 | /* The cast to a user pointer is valid due to the set_fs() */ | ||
1320 | ret = vfs_write(ts->wr, (void __user *)v, len, &ts->wr->f_pos); | ||
1321 | set_fs(oldfs); | ||
1322 | |||
1323 | if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) | ||
1324 | trans->status = Disconnected; | ||
1325 | return ret; | ||
1326 | } | ||
1327 | |||
1328 | static unsigned int | ||
1329 | p9_fd_poll(struct p9_trans *trans, struct poll_table_struct *pt) | ||
1330 | { | 815 | { |
1331 | int ret, n; | 816 | P9_DPRINTK(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", m, |
1332 | struct p9_trans_fd *ts = NULL; | 817 | m->mux_list.prev, m->mux_list.next); |
1333 | |||
1334 | if (trans && trans->status == Connected) | ||
1335 | ts = trans->priv; | ||
1336 | |||
1337 | if (!ts) | ||
1338 | return -EREMOTEIO; | ||
1339 | |||
1340 | if (!ts->rd->f_op || !ts->rd->f_op->poll) | ||
1341 | return -EIO; | ||
1342 | |||
1343 | if (!ts->wr->f_op || !ts->wr->f_op->poll) | ||
1344 | return -EIO; | ||
1345 | 818 | ||
1346 | ret = ts->rd->f_op->poll(ts->rd, pt); | 819 | p9_mux_poll_stop(m); |
1347 | if (ret < 0) | 820 | cancel_work_sync(&m->rq); |
1348 | return ret; | 821 | cancel_work_sync(&m->wq); |
1349 | 822 | ||
1350 | if (ts->rd != ts->wr) { | 823 | p9_conn_cancel(m, -ECONNRESET); |
1351 | n = ts->wr->f_op->poll(ts->wr, pt); | ||
1352 | if (n < 0) | ||
1353 | return n; | ||
1354 | ret = (ret & ~POLLOUT) | (n & ~POLLIN); | ||
1355 | } | ||
1356 | 824 | ||
1357 | return ret; | 825 | m->client = NULL; |
826 | kfree(m); | ||
1358 | } | 827 | } |
1359 | 828 | ||
1360 | /** | 829 | /** |
1361 | * p9_fd_close - shutdown socket | 830 | * p9_fd_close - shutdown file descriptor transport |
1362 | * @trans: private socket structure | 831 | * @client: client instance |
1363 | * | 832 | * |
1364 | */ | 833 | */ |
1365 | 834 | ||
1366 | static void p9_fd_close(struct p9_trans *trans) | 835 | static void p9_fd_close(struct p9_client *client) |
1367 | { | 836 | { |
1368 | struct p9_trans_fd *ts; | 837 | struct p9_trans_fd *ts; |
1369 | 838 | ||
1370 | if (!trans) | 839 | if (!client) |
1371 | return; | 840 | return; |
1372 | 841 | ||
1373 | ts = xchg(&trans->priv, NULL); | 842 | ts = client->trans; |
1374 | |||
1375 | if (!ts) | 843 | if (!ts) |
1376 | return; | 844 | return; |
1377 | 845 | ||
846 | client->status = Disconnected; | ||
847 | |||
1378 | p9_conn_destroy(ts->conn); | 848 | p9_conn_destroy(ts->conn); |
1379 | 849 | ||
1380 | trans->status = Disconnected; | ||
1381 | if (ts->rd) | 850 | if (ts->rd) |
1382 | fput(ts->rd); | 851 | fput(ts->rd); |
1383 | if (ts->wr) | 852 | if (ts->wr) |
1384 | fput(ts->wr); | 853 | fput(ts->wr); |
854 | |||
1385 | kfree(ts); | 855 | kfree(ts); |
1386 | } | 856 | } |
1387 | 857 | ||
@@ -1402,31 +872,23 @@ static inline int valid_ipaddr4(const char *buf) | |||
1402 | return 0; | 872 | return 0; |
1403 | } | 873 | } |
1404 | 874 | ||
1405 | static struct p9_trans * | 875 | static int |
1406 | p9_trans_create_tcp(const char *addr, char *args, int msize, unsigned char dotu) | 876 | p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) |
1407 | { | 877 | { |
1408 | int err; | 878 | int err; |
1409 | struct p9_trans *trans; | ||
1410 | struct socket *csocket; | 879 | struct socket *csocket; |
1411 | struct sockaddr_in sin_server; | 880 | struct sockaddr_in sin_server; |
1412 | struct p9_fd_opts opts; | 881 | struct p9_fd_opts opts; |
1413 | struct p9_trans_fd *p; | 882 | struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */ |
1414 | 883 | ||
1415 | err = parse_opts(args, &opts); | 884 | err = parse_opts(args, &opts); |
1416 | if (err < 0) | 885 | if (err < 0) |
1417 | return ERR_PTR(err); | 886 | return err; |
1418 | 887 | ||
1419 | if (valid_ipaddr4(addr) < 0) | 888 | if (valid_ipaddr4(addr) < 0) |
1420 | return ERR_PTR(-EINVAL); | 889 | return -EINVAL; |
1421 | 890 | ||
1422 | csocket = NULL; | 891 | csocket = NULL; |
1423 | trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL); | ||
1424 | if (!trans) | ||
1425 | return ERR_PTR(-ENOMEM); | ||
1426 | trans->msize = msize; | ||
1427 | trans->extended = dotu; | ||
1428 | trans->rpc = p9_fd_rpc; | ||
1429 | trans->close = p9_fd_close; | ||
1430 | 892 | ||
1431 | sin_server.sin_family = AF_INET; | 893 | sin_server.sin_family = AF_INET; |
1432 | sin_server.sin_addr.s_addr = in_aton(addr); | 894 | sin_server.sin_addr.s_addr = in_aton(addr); |
@@ -1449,45 +911,38 @@ p9_trans_create_tcp(const char *addr, char *args, int msize, unsigned char dotu) | |||
1449 | goto error; | 911 | goto error; |
1450 | } | 912 | } |
1451 | 913 | ||
1452 | err = p9_socket_open(trans, csocket); | 914 | err = p9_socket_open(client, csocket); |
1453 | if (err < 0) | 915 | if (err < 0) |
1454 | goto error; | 916 | goto error; |
1455 | 917 | ||
1456 | p = (struct p9_trans_fd *) trans->priv; | 918 | p = (struct p9_trans_fd *) client->trans; |
1457 | p->conn = p9_conn_create(trans); | 919 | p->conn = p9_conn_create(client); |
1458 | if (IS_ERR(p->conn)) { | 920 | if (IS_ERR(p->conn)) { |
1459 | err = PTR_ERR(p->conn); | 921 | err = PTR_ERR(p->conn); |
1460 | p->conn = NULL; | 922 | p->conn = NULL; |
1461 | goto error; | 923 | goto error; |
1462 | } | 924 | } |
1463 | 925 | ||
1464 | return trans; | 926 | return 0; |
1465 | 927 | ||
1466 | error: | 928 | error: |
1467 | if (csocket) | 929 | if (csocket) |
1468 | sock_release(csocket); | 930 | sock_release(csocket); |
1469 | 931 | ||
1470 | kfree(trans); | 932 | kfree(p); |
1471 | return ERR_PTR(err); | 933 | |
934 | return err; | ||
1472 | } | 935 | } |
1473 | 936 | ||
1474 | static struct p9_trans * | 937 | static int |
1475 | p9_trans_create_unix(const char *addr, char *args, int msize, | 938 | p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) |
1476 | unsigned char dotu) | ||
1477 | { | 939 | { |
1478 | int err; | 940 | int err; |
1479 | struct socket *csocket; | 941 | struct socket *csocket; |
1480 | struct sockaddr_un sun_server; | 942 | struct sockaddr_un sun_server; |
1481 | struct p9_trans *trans; | 943 | struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */ |
1482 | struct p9_trans_fd *p; | ||
1483 | 944 | ||
1484 | csocket = NULL; | 945 | csocket = NULL; |
1485 | trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL); | ||
1486 | if (!trans) | ||
1487 | return ERR_PTR(-ENOMEM); | ||
1488 | |||
1489 | trans->rpc = p9_fd_rpc; | ||
1490 | trans->close = p9_fd_close; | ||
1491 | 946 | ||
1492 | if (strlen(addr) > UNIX_PATH_MAX) { | 947 | if (strlen(addr) > UNIX_PATH_MAX) { |
1493 | P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", | 948 | P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", |
@@ -1508,79 +963,69 @@ p9_trans_create_unix(const char *addr, char *args, int msize, | |||
1508 | goto error; | 963 | goto error; |
1509 | } | 964 | } |
1510 | 965 | ||
1511 | err = p9_socket_open(trans, csocket); | 966 | err = p9_socket_open(client, csocket); |
1512 | if (err < 0) | 967 | if (err < 0) |
1513 | goto error; | 968 | goto error; |
1514 | 969 | ||
1515 | trans->msize = msize; | 970 | p = (struct p9_trans_fd *) client->trans; |
1516 | trans->extended = dotu; | 971 | p->conn = p9_conn_create(client); |
1517 | p = (struct p9_trans_fd *) trans->priv; | ||
1518 | p->conn = p9_conn_create(trans); | ||
1519 | if (IS_ERR(p->conn)) { | 972 | if (IS_ERR(p->conn)) { |
1520 | err = PTR_ERR(p->conn); | 973 | err = PTR_ERR(p->conn); |
1521 | p->conn = NULL; | 974 | p->conn = NULL; |
1522 | goto error; | 975 | goto error; |
1523 | } | 976 | } |
1524 | 977 | ||
1525 | return trans; | 978 | return 0; |
1526 | 979 | ||
1527 | error: | 980 | error: |
1528 | if (csocket) | 981 | if (csocket) |
1529 | sock_release(csocket); | 982 | sock_release(csocket); |
1530 | 983 | ||
1531 | kfree(trans); | 984 | kfree(p); |
1532 | return ERR_PTR(err); | 985 | return err; |
1533 | } | 986 | } |
1534 | 987 | ||
1535 | static struct p9_trans * | 988 | static int |
1536 | p9_trans_create_fd(const char *name, char *args, int msize, | 989 | p9_fd_create(struct p9_client *client, const char *addr, char *args) |
1537 | unsigned char extended) | ||
1538 | { | 990 | { |
1539 | int err; | 991 | int err; |
1540 | struct p9_trans *trans; | ||
1541 | struct p9_fd_opts opts; | 992 | struct p9_fd_opts opts; |
1542 | struct p9_trans_fd *p; | 993 | struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */ |
1543 | 994 | ||
1544 | parse_opts(args, &opts); | 995 | parse_opts(args, &opts); |
1545 | 996 | ||
1546 | if (opts.rfd == ~0 || opts.wfd == ~0) { | 997 | if (opts.rfd == ~0 || opts.wfd == ~0) { |
1547 | printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n"); | 998 | printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n"); |
1548 | return ERR_PTR(-ENOPROTOOPT); | 999 | return -ENOPROTOOPT; |
1549 | } | 1000 | } |
1550 | 1001 | ||
1551 | trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL); | 1002 | err = p9_fd_open(client, opts.rfd, opts.wfd); |
1552 | if (!trans) | ||
1553 | return ERR_PTR(-ENOMEM); | ||
1554 | |||
1555 | trans->rpc = p9_fd_rpc; | ||
1556 | trans->close = p9_fd_close; | ||
1557 | |||
1558 | err = p9_fd_open(trans, opts.rfd, opts.wfd); | ||
1559 | if (err < 0) | 1003 | if (err < 0) |
1560 | goto error; | 1004 | goto error; |
1561 | 1005 | ||
1562 | trans->msize = msize; | 1006 | p = (struct p9_trans_fd *) client->trans; |
1563 | trans->extended = extended; | 1007 | p->conn = p9_conn_create(client); |
1564 | p = (struct p9_trans_fd *) trans->priv; | ||
1565 | p->conn = p9_conn_create(trans); | ||
1566 | if (IS_ERR(p->conn)) { | 1008 | if (IS_ERR(p->conn)) { |
1567 | err = PTR_ERR(p->conn); | 1009 | err = PTR_ERR(p->conn); |
1568 | p->conn = NULL; | 1010 | p->conn = NULL; |
1569 | goto error; | 1011 | goto error; |
1570 | } | 1012 | } |
1571 | 1013 | ||
1572 | return trans; | 1014 | return 0; |
1573 | 1015 | ||
1574 | error: | 1016 | error: |
1575 | kfree(trans); | 1017 | kfree(p); |
1576 | return ERR_PTR(err); | 1018 | return err; |
1577 | } | 1019 | } |
1578 | 1020 | ||
1579 | static struct p9_trans_module p9_tcp_trans = { | 1021 | static struct p9_trans_module p9_tcp_trans = { |
1580 | .name = "tcp", | 1022 | .name = "tcp", |
1581 | .maxsize = MAX_SOCK_BUF, | 1023 | .maxsize = MAX_SOCK_BUF, |
1582 | .def = 1, | 1024 | .def = 1, |
1583 | .create = p9_trans_create_tcp, | 1025 | .create = p9_fd_create_tcp, |
1026 | .close = p9_fd_close, | ||
1027 | .request = p9_fd_request, | ||
1028 | .cancel = p9_fd_cancel, | ||
1584 | .owner = THIS_MODULE, | 1029 | .owner = THIS_MODULE, |
1585 | }; | 1030 | }; |
1586 | 1031 | ||
@@ -1588,7 +1033,10 @@ static struct p9_trans_module p9_unix_trans = { | |||
1588 | .name = "unix", | 1033 | .name = "unix", |
1589 | .maxsize = MAX_SOCK_BUF, | 1034 | .maxsize = MAX_SOCK_BUF, |
1590 | .def = 0, | 1035 | .def = 0, |
1591 | .create = p9_trans_create_unix, | 1036 | .create = p9_fd_create_unix, |
1037 | .close = p9_fd_close, | ||
1038 | .request = p9_fd_request, | ||
1039 | .cancel = p9_fd_cancel, | ||
1592 | .owner = THIS_MODULE, | 1040 | .owner = THIS_MODULE, |
1593 | }; | 1041 | }; |
1594 | 1042 | ||
@@ -1596,23 +1044,71 @@ static struct p9_trans_module p9_fd_trans = { | |||
1596 | .name = "fd", | 1044 | .name = "fd", |
1597 | .maxsize = MAX_SOCK_BUF, | 1045 | .maxsize = MAX_SOCK_BUF, |
1598 | .def = 0, | 1046 | .def = 0, |
1599 | .create = p9_trans_create_fd, | 1047 | .create = p9_fd_create, |
1048 | .close = p9_fd_close, | ||
1049 | .request = p9_fd_request, | ||
1050 | .cancel = p9_fd_cancel, | ||
1600 | .owner = THIS_MODULE, | 1051 | .owner = THIS_MODULE, |
1601 | }; | 1052 | }; |
1602 | 1053 | ||
1603 | int p9_trans_fd_init(void) | 1054 | /** |
1055 | * p9_poll_proc - poll worker thread | ||
1056 | * @a: thread state and arguments | ||
1057 | * | ||
1058 | * polls all v9fs transports for new events and queues the appropriate | ||
1059 | * work to the work queue | ||
1060 | * | ||
1061 | */ | ||
1062 | |||
1063 | static int p9_poll_proc(void *a) | ||
1604 | { | 1064 | { |
1605 | int i; | 1065 | unsigned long flags; |
1066 | |||
1067 | P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current); | ||
1068 | repeat: | ||
1069 | spin_lock_irqsave(&p9_poll_lock, flags); | ||
1070 | while (!list_empty(&p9_poll_pending_list)) { | ||
1071 | struct p9_conn *conn = list_first_entry(&p9_poll_pending_list, | ||
1072 | struct p9_conn, | ||
1073 | poll_pending_link); | ||
1074 | list_del_init(&conn->poll_pending_link); | ||
1075 | spin_unlock_irqrestore(&p9_poll_lock, flags); | ||
1076 | |||
1077 | p9_poll_mux(conn); | ||
1078 | |||
1079 | spin_lock_irqsave(&p9_poll_lock, flags); | ||
1080 | } | ||
1081 | spin_unlock_irqrestore(&p9_poll_lock, flags); | ||
1082 | |||
1083 | set_current_state(TASK_INTERRUPTIBLE); | ||
1084 | if (list_empty(&p9_poll_pending_list)) { | ||
1085 | P9_DPRINTK(P9_DEBUG_TRANS, "sleeping...\n"); | ||
1086 | schedule(); | ||
1087 | } | ||
1088 | __set_current_state(TASK_RUNNING); | ||
1089 | |||
1090 | if (!kthread_should_stop()) | ||
1091 | goto repeat; | ||
1606 | 1092 | ||
1607 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) | 1093 | P9_DPRINTK(P9_DEBUG_TRANS, "finish\n"); |
1608 | p9_mux_poll_tasks[i].task = NULL; | 1094 | return 0; |
1095 | } | ||
1609 | 1096 | ||
1097 | int p9_trans_fd_init(void) | ||
1098 | { | ||
1610 | p9_mux_wq = create_workqueue("v9fs"); | 1099 | p9_mux_wq = create_workqueue("v9fs"); |
1611 | if (!p9_mux_wq) { | 1100 | if (!p9_mux_wq) { |
1612 | printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n"); | 1101 | printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n"); |
1613 | return -ENOMEM; | 1102 | return -ENOMEM; |
1614 | } | 1103 | } |
1615 | 1104 | ||
1105 | p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll"); | ||
1106 | if (IS_ERR(p9_poll_task)) { | ||
1107 | destroy_workqueue(p9_mux_wq); | ||
1108 | printk(KERN_WARNING "v9fs: mux: creating poll task failed\n"); | ||
1109 | return PTR_ERR(p9_poll_task); | ||
1110 | } | ||
1111 | |||
1616 | v9fs_register_trans(&p9_tcp_trans); | 1112 | v9fs_register_trans(&p9_tcp_trans); |
1617 | v9fs_register_trans(&p9_unix_trans); | 1113 | v9fs_register_trans(&p9_unix_trans); |
1618 | v9fs_register_trans(&p9_fd_trans); | 1114 | v9fs_register_trans(&p9_fd_trans); |
@@ -1622,6 +1118,7 @@ int p9_trans_fd_init(void) | |||
1622 | 1118 | ||
1623 | void p9_trans_fd_exit(void) | 1119 | void p9_trans_fd_exit(void) |
1624 | { | 1120 | { |
1121 | kthread_stop(p9_poll_task); | ||
1625 | v9fs_unregister_trans(&p9_tcp_trans); | 1122 | v9fs_unregister_trans(&p9_tcp_trans); |
1626 | v9fs_unregister_trans(&p9_unix_trans); | 1123 | v9fs_unregister_trans(&p9_unix_trans); |
1627 | v9fs_unregister_trans(&p9_fd_trans); | 1124 | v9fs_unregister_trans(&p9_fd_trans); |