diff options
author | Eric Van Hensbergen <ericvh@opteron.homeip.net> | 2008-02-06 20:25:03 -0500 |
---|---|---|
committer | Eric Van Hensbergen <ericvh@opteron.homeip.net> | 2008-02-06 20:25:03 -0500 |
commit | 8a0dc95fd976a052e5e799ef33e6c8e3141b5dff (patch) | |
tree | 3275903539244acd76c716662c324833aa419377 /net/9p/trans_fd.c | |
parent | f39335453fe79f4e12e263e7c6387dc9fb86bfff (diff) |
9p: transport API reorganization
This merges the mux.c (including the connection interface) with trans_fd
in preparation for transport API changes. Ultimately, trans_fd will need
to be rewritten to clean it up and simplify the implementation, but this
reorganization is viewed as the first step.
Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
Diffstat (limited to 'net/9p/trans_fd.c')
-rw-r--r-- | net/9p/trans_fd.c | 1103 |
1 files changed, 1081 insertions, 22 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 62332ed9da4a..1aa9d5175398 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) 2006 by Russ Cox <rsc@swtch.com> | 6 | * Copyright (C) 2006 by Russ Cox <rsc@swtch.com> |
7 | * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net> | 7 | * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net> |
8 | * Copyright (C) 2004-2007 by Eric Van Hensbergen <ericvh@gmail.com> | 8 | * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com> |
9 | * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com> | 9 | * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/net.h> | 30 | #include <linux/net.h> |
31 | #include <linux/ipv6.h> | 31 | #include <linux/ipv6.h> |
32 | #include <linux/kthread.h> | ||
32 | #include <linux/errno.h> | 33 | #include <linux/errno.h> |
33 | #include <linux/kernel.h> | 34 | #include <linux/kernel.h> |
34 | #include <linux/un.h> | 35 | #include <linux/un.h> |
@@ -42,7 +43,9 @@ | |||
42 | 43 | ||
43 | #define P9_PORT 564 | 44 | #define P9_PORT 564 |
44 | #define MAX_SOCK_BUF (64*1024) | 45 | #define MAX_SOCK_BUF (64*1024) |
45 | 46 | #define ERREQFLUSH 1 | |
47 | #define SCHED_TIMEOUT 10 | ||
48 | #define MAXPOLLWADDR 2 | ||
46 | 49 | ||
47 | struct p9_fd_opts { | 50 | struct p9_fd_opts { |
48 | int rfd; | 51 | int rfd; |
@@ -53,6 +56,7 @@ struct p9_fd_opts { | |||
53 | struct p9_trans_fd { | 56 | struct p9_trans_fd { |
54 | struct file *rd; | 57 | struct file *rd; |
55 | struct file *wr; | 58 | struct file *wr; |
59 | struct p9_conn *conn; | ||
56 | }; | 60 | }; |
57 | 61 | ||
58 | /* | 62 | /* |
@@ -72,6 +76,1028 @@ static match_table_t tokens = { | |||
72 | {Opt_err, NULL}, | 76 | {Opt_err, NULL}, |
73 | }; | 77 | }; |
74 | 78 | ||
79 | enum { | ||
80 | Rworksched = 1, /* read work scheduled or running */ | ||
81 | Rpending = 2, /* can read */ | ||
82 | Wworksched = 4, /* write work scheduled or running */ | ||
83 | Wpending = 8, /* can write */ | ||
84 | }; | ||
85 | |||
86 | enum { | ||
87 | None, | ||
88 | Flushing, | ||
89 | Flushed, | ||
90 | }; | ||
91 | |||
92 | struct p9_req; | ||
93 | |||
94 | typedef void (*p9_conn_req_callback)(struct p9_req *req, void *a); | ||
95 | struct p9_req { | ||
96 | spinlock_t lock; /* protect request structure */ | ||
97 | int tag; | ||
98 | struct p9_fcall *tcall; | ||
99 | struct p9_fcall *rcall; | ||
100 | int err; | ||
101 | p9_conn_req_callback cb; | ||
102 | void *cba; | ||
103 | int flush; | ||
104 | struct list_head req_list; | ||
105 | }; | ||
106 | |||
107 | struct p9_mux_poll_task; | ||
108 | |||
109 | struct p9_conn { | ||
110 | spinlock_t lock; /* protect lock structure */ | ||
111 | struct list_head mux_list; | ||
112 | struct p9_mux_poll_task *poll_task; | ||
113 | int msize; | ||
114 | unsigned char extended; | ||
115 | struct p9_trans *trans; | ||
116 | struct p9_idpool *tagpool; | ||
117 | int err; | ||
118 | wait_queue_head_t equeue; | ||
119 | struct list_head req_list; | ||
120 | struct list_head unsent_req_list; | ||
121 | struct p9_fcall *rcall; | ||
122 | int rpos; | ||
123 | char *rbuf; | ||
124 | int wpos; | ||
125 | int wsize; | ||
126 | char *wbuf; | ||
127 | wait_queue_t poll_wait[MAXPOLLWADDR]; | ||
128 | wait_queue_head_t *poll_waddr[MAXPOLLWADDR]; | ||
129 | poll_table pt; | ||
130 | struct work_struct rq; | ||
131 | struct work_struct wq; | ||
132 | unsigned long wsched; | ||
133 | }; | ||
134 | |||
135 | struct p9_mux_poll_task { | ||
136 | struct task_struct *task; | ||
137 | struct list_head mux_list; | ||
138 | int muxnum; | ||
139 | }; | ||
140 | |||
141 | struct p9_mux_rpc { | ||
142 | struct p9_conn *m; | ||
143 | int err; | ||
144 | struct p9_fcall *tcall; | ||
145 | struct p9_fcall *rcall; | ||
146 | wait_queue_head_t wqueue; | ||
147 | }; | ||
148 | |||
149 | static int p9_poll_proc(void *); | ||
150 | static void p9_read_work(struct work_struct *work); | ||
151 | static void p9_write_work(struct work_struct *work); | ||
152 | static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, | ||
153 | poll_table *p); | ||
154 | static int p9_fd_write(struct p9_trans *trans, void *v, int len); | ||
155 | static int p9_fd_read(struct p9_trans *trans, void *v, int len); | ||
156 | |||
157 | static DEFINE_MUTEX(p9_mux_task_lock); | ||
158 | static struct workqueue_struct *p9_mux_wq; | ||
159 | |||
160 | static int p9_mux_num; | ||
161 | static int p9_mux_poll_task_num; | ||
162 | static struct p9_mux_poll_task p9_mux_poll_tasks[100]; | ||
163 | |||
164 | static void p9_conn_destroy(struct p9_conn *); | ||
165 | static unsigned int p9_fd_poll(struct p9_trans *trans, | ||
166 | struct poll_table_struct *pt); | ||
167 | |||
168 | #ifdef P9_NONBLOCK | ||
169 | static int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc, | ||
170 | p9_conn_req_callback cb, void *a); | ||
171 | #endif /* P9_NONBLOCK */ | ||
172 | |||
173 | static void p9_conn_cancel(struct p9_conn *m, int err); | ||
174 | |||
175 | static int p9_mux_global_init(void) | ||
176 | { | ||
177 | int i; | ||
178 | |||
179 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) | ||
180 | p9_mux_poll_tasks[i].task = NULL; | ||
181 | |||
182 | p9_mux_wq = create_workqueue("v9fs"); | ||
183 | if (!p9_mux_wq) { | ||
184 | printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n"); | ||
185 | return -ENOMEM; | ||
186 | } | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static u16 p9_mux_get_tag(struct p9_conn *m) | ||
192 | { | ||
193 | int tag; | ||
194 | |||
195 | tag = p9_idpool_get(m->tagpool); | ||
196 | if (tag < 0) | ||
197 | return P9_NOTAG; | ||
198 | else | ||
199 | return (u16) tag; | ||
200 | } | ||
201 | |||
202 | static void p9_mux_put_tag(struct p9_conn *m, u16 tag) | ||
203 | { | ||
204 | if (tag != P9_NOTAG && p9_idpool_check(tag, m->tagpool)) | ||
205 | p9_idpool_put(tag, m->tagpool); | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * p9_mux_calc_poll_procs - calculates the number of polling procs | ||
210 | * based on the number of mounted v9fs filesystems. | ||
211 | * | ||
212 | * The current implementation returns sqrt of the number of mounts. | ||
213 | */ | ||
214 | static int p9_mux_calc_poll_procs(int muxnum) | ||
215 | { | ||
216 | int n; | ||
217 | |||
218 | if (p9_mux_poll_task_num) | ||
219 | n = muxnum / p9_mux_poll_task_num + | ||
220 | (muxnum % p9_mux_poll_task_num ? 1 : 0); | ||
221 | else | ||
222 | n = 1; | ||
223 | |||
224 | if (n > ARRAY_SIZE(p9_mux_poll_tasks)) | ||
225 | n = ARRAY_SIZE(p9_mux_poll_tasks); | ||
226 | |||
227 | return n; | ||
228 | } | ||
229 | |||
230 | static int p9_mux_poll_start(struct p9_conn *m) | ||
231 | { | ||
232 | int i, n; | ||
233 | struct p9_mux_poll_task *vpt, *vptlast; | ||
234 | struct task_struct *pproc; | ||
235 | |||
236 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, p9_mux_num, | ||
237 | p9_mux_poll_task_num); | ||
238 | mutex_lock(&p9_mux_task_lock); | ||
239 | |||
240 | n = p9_mux_calc_poll_procs(p9_mux_num + 1); | ||
241 | if (n > p9_mux_poll_task_num) { | ||
242 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) { | ||
243 | if (p9_mux_poll_tasks[i].task == NULL) { | ||
244 | vpt = &p9_mux_poll_tasks[i]; | ||
245 | P9_DPRINTK(P9_DEBUG_MUX, "create proc %p\n", | ||
246 | vpt); | ||
247 | pproc = kthread_create(p9_poll_proc, vpt, | ||
248 | "v9fs-poll"); | ||
249 | |||
250 | if (!IS_ERR(pproc)) { | ||
251 | vpt->task = pproc; | ||
252 | INIT_LIST_HEAD(&vpt->mux_list); | ||
253 | vpt->muxnum = 0; | ||
254 | p9_mux_poll_task_num++; | ||
255 | wake_up_process(vpt->task); | ||
256 | } | ||
257 | break; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) | ||
262 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
263 | "warning: no free poll slots\n"); | ||
264 | } | ||
265 | |||
266 | n = (p9_mux_num + 1) / p9_mux_poll_task_num + | ||
267 | ((p9_mux_num + 1) % p9_mux_poll_task_num ? 1 : 0); | ||
268 | |||
269 | vptlast = NULL; | ||
270 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) { | ||
271 | vpt = &p9_mux_poll_tasks[i]; | ||
272 | if (vpt->task != NULL) { | ||
273 | vptlast = vpt; | ||
274 | if (vpt->muxnum < n) { | ||
275 | P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); | ||
276 | list_add(&m->mux_list, &vpt->mux_list); | ||
277 | vpt->muxnum++; | ||
278 | m->poll_task = vpt; | ||
279 | memset(&m->poll_waddr, 0, | ||
280 | sizeof(m->poll_waddr)); | ||
281 | init_poll_funcptr(&m->pt, p9_pollwait); | ||
282 | break; | ||
283 | } | ||
284 | } | ||
285 | } | ||
286 | |||
287 | if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) { | ||
288 | if (vptlast == NULL) { | ||
289 | mutex_unlock(&p9_mux_task_lock); | ||
290 | return -ENOMEM; | ||
291 | } | ||
292 | |||
293 | P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); | ||
294 | list_add(&m->mux_list, &vptlast->mux_list); | ||
295 | vptlast->muxnum++; | ||
296 | m->poll_task = vptlast; | ||
297 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); | ||
298 | init_poll_funcptr(&m->pt, p9_pollwait); | ||
299 | } | ||
300 | |||
301 | p9_mux_num++; | ||
302 | mutex_unlock(&p9_mux_task_lock); | ||
303 | |||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | static void p9_mux_poll_stop(struct p9_conn *m) | ||
308 | { | ||
309 | int i; | ||
310 | struct p9_mux_poll_task *vpt; | ||
311 | |||
312 | mutex_lock(&p9_mux_task_lock); | ||
313 | vpt = m->poll_task; | ||
314 | list_del(&m->mux_list); | ||
315 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { | ||
316 | if (m->poll_waddr[i] != NULL) { | ||
317 | remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]); | ||
318 | m->poll_waddr[i] = NULL; | ||
319 | } | ||
320 | } | ||
321 | vpt->muxnum--; | ||
322 | if (!vpt->muxnum) { | ||
323 | P9_DPRINTK(P9_DEBUG_MUX, "destroy proc %p\n", vpt); | ||
324 | kthread_stop(vpt->task); | ||
325 | vpt->task = NULL; | ||
326 | p9_mux_poll_task_num--; | ||
327 | } | ||
328 | p9_mux_num--; | ||
329 | mutex_unlock(&p9_mux_task_lock); | ||
330 | } | ||
331 | |||
332 | /** | ||
333 | * p9_conn_create - allocate and initialize the per-session mux data | ||
334 | * Creates the polling task if this is the first session. | ||
335 | * | ||
336 | * @trans - transport structure | ||
337 | * @msize - maximum message size | ||
338 | * @extended - extended flag | ||
339 | */ | ||
340 | static struct p9_conn *p9_conn_create(struct p9_trans *trans) | ||
341 | { | ||
342 | int i, n; | ||
343 | struct p9_conn *m, *mtmp; | ||
344 | |||
345 | P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans, | ||
346 | trans->msize); | ||
347 | m = kmalloc(sizeof(struct p9_conn), GFP_KERNEL); | ||
348 | if (!m) | ||
349 | return ERR_PTR(-ENOMEM); | ||
350 | |||
351 | spin_lock_init(&m->lock); | ||
352 | INIT_LIST_HEAD(&m->mux_list); | ||
353 | m->msize = trans->msize; | ||
354 | m->extended = trans->extended; | ||
355 | m->trans = trans; | ||
356 | m->tagpool = p9_idpool_create(); | ||
357 | if (IS_ERR(m->tagpool)) { | ||
358 | mtmp = ERR_PTR(-ENOMEM); | ||
359 | kfree(m); | ||
360 | return mtmp; | ||
361 | } | ||
362 | |||
363 | m->err = 0; | ||
364 | init_waitqueue_head(&m->equeue); | ||
365 | INIT_LIST_HEAD(&m->req_list); | ||
366 | INIT_LIST_HEAD(&m->unsent_req_list); | ||
367 | m->rcall = NULL; | ||
368 | m->rpos = 0; | ||
369 | m->rbuf = NULL; | ||
370 | m->wpos = m->wsize = 0; | ||
371 | m->wbuf = NULL; | ||
372 | INIT_WORK(&m->rq, p9_read_work); | ||
373 | INIT_WORK(&m->wq, p9_write_work); | ||
374 | m->wsched = 0; | ||
375 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); | ||
376 | m->poll_task = NULL; | ||
377 | n = p9_mux_poll_start(m); | ||
378 | if (n) { | ||
379 | kfree(m); | ||
380 | return ERR_PTR(n); | ||
381 | } | ||
382 | |||
383 | n = p9_fd_poll(trans, &m->pt); | ||
384 | if (n & POLLIN) { | ||
385 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m); | ||
386 | set_bit(Rpending, &m->wsched); | ||
387 | } | ||
388 | |||
389 | if (n & POLLOUT) { | ||
390 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m); | ||
391 | set_bit(Wpending, &m->wsched); | ||
392 | } | ||
393 | |||
394 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { | ||
395 | if (IS_ERR(m->poll_waddr[i])) { | ||
396 | p9_mux_poll_stop(m); | ||
397 | mtmp = (void *)m->poll_waddr; /* the error code */ | ||
398 | kfree(m); | ||
399 | m = mtmp; | ||
400 | break; | ||
401 | } | ||
402 | } | ||
403 | |||
404 | return m; | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * p9_mux_destroy - cancels all pending requests and frees mux resources | ||
409 | */ | ||
410 | static void p9_conn_destroy(struct p9_conn *m) | ||
411 | { | ||
412 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m, | ||
413 | m->mux_list.prev, m->mux_list.next); | ||
414 | p9_conn_cancel(m, -ECONNRESET); | ||
415 | |||
416 | if (!list_empty(&m->req_list)) { | ||
417 | /* wait until all processes waiting on this session exit */ | ||
418 | P9_DPRINTK(P9_DEBUG_MUX, | ||
419 | "mux %p waiting for empty request queue\n", m); | ||
420 | wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000); | ||
421 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p request queue empty: %d\n", m, | ||
422 | list_empty(&m->req_list)); | ||
423 | } | ||
424 | |||
425 | p9_mux_poll_stop(m); | ||
426 | m->trans = NULL; | ||
427 | p9_idpool_destroy(m->tagpool); | ||
428 | kfree(m); | ||
429 | } | ||
430 | |||
431 | /** | ||
432 | * p9_pollwait - called by files poll operation to add v9fs-poll task | ||
433 | * to files wait queue | ||
434 | */ | ||
435 | static void | ||
436 | p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p) | ||
437 | { | ||
438 | int i; | ||
439 | struct p9_conn *m; | ||
440 | |||
441 | m = container_of(p, struct p9_conn, pt); | ||
442 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) | ||
443 | if (m->poll_waddr[i] == NULL) | ||
444 | break; | ||
445 | |||
446 | if (i >= ARRAY_SIZE(m->poll_waddr)) { | ||
447 | P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n"); | ||
448 | return; | ||
449 | } | ||
450 | |||
451 | m->poll_waddr[i] = wait_address; | ||
452 | |||
453 | if (!wait_address) { | ||
454 | P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n"); | ||
455 | m->poll_waddr[i] = ERR_PTR(-EIO); | ||
456 | return; | ||
457 | } | ||
458 | |||
459 | init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task); | ||
460 | add_wait_queue(wait_address, &m->poll_wait[i]); | ||
461 | } | ||
462 | |||
463 | /** | ||
464 | * p9_poll_mux - polls a mux and schedules read or write works if necessary | ||
465 | */ | ||
466 | static void p9_poll_mux(struct p9_conn *m) | ||
467 | { | ||
468 | int n; | ||
469 | |||
470 | if (m->err < 0) | ||
471 | return; | ||
472 | |||
473 | n = p9_fd_poll(m->trans, NULL); | ||
474 | if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) { | ||
475 | P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n); | ||
476 | if (n >= 0) | ||
477 | n = -ECONNRESET; | ||
478 | p9_conn_cancel(m, n); | ||
479 | } | ||
480 | |||
481 | if (n & POLLIN) { | ||
482 | set_bit(Rpending, &m->wsched); | ||
483 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m); | ||
484 | if (!test_and_set_bit(Rworksched, &m->wsched)) { | ||
485 | P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m); | ||
486 | queue_work(p9_mux_wq, &m->rq); | ||
487 | } | ||
488 | } | ||
489 | |||
490 | if (n & POLLOUT) { | ||
491 | set_bit(Wpending, &m->wsched); | ||
492 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m); | ||
493 | if ((m->wsize || !list_empty(&m->unsent_req_list)) | ||
494 | && !test_and_set_bit(Wworksched, &m->wsched)) { | ||
495 | P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m); | ||
496 | queue_work(p9_mux_wq, &m->wq); | ||
497 | } | ||
498 | } | ||
499 | } | ||
500 | |||
501 | /** | ||
502 | * p9_poll_proc - polls all v9fs transports for new events and queues | ||
503 | * the appropriate work to the work queue | ||
504 | */ | ||
505 | static int p9_poll_proc(void *a) | ||
506 | { | ||
507 | struct p9_conn *m, *mtmp; | ||
508 | struct p9_mux_poll_task *vpt; | ||
509 | |||
510 | vpt = a; | ||
511 | P9_DPRINTK(P9_DEBUG_MUX, "start %p %p\n", current, vpt); | ||
512 | while (!kthread_should_stop()) { | ||
513 | set_current_state(TASK_INTERRUPTIBLE); | ||
514 | |||
515 | list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) { | ||
516 | p9_poll_mux(m); | ||
517 | } | ||
518 | |||
519 | P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n"); | ||
520 | schedule_timeout(SCHED_TIMEOUT * HZ); | ||
521 | } | ||
522 | |||
523 | __set_current_state(TASK_RUNNING); | ||
524 | P9_DPRINTK(P9_DEBUG_MUX, "finish\n"); | ||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | /** | ||
529 | * p9_write_work - called when a transport can send some data | ||
530 | */ | ||
531 | static void p9_write_work(struct work_struct *work) | ||
532 | { | ||
533 | int n, err; | ||
534 | struct p9_conn *m; | ||
535 | struct p9_req *req; | ||
536 | |||
537 | m = container_of(work, struct p9_conn, wq); | ||
538 | |||
539 | if (m->err < 0) { | ||
540 | clear_bit(Wworksched, &m->wsched); | ||
541 | return; | ||
542 | } | ||
543 | |||
544 | if (!m->wsize) { | ||
545 | if (list_empty(&m->unsent_req_list)) { | ||
546 | clear_bit(Wworksched, &m->wsched); | ||
547 | return; | ||
548 | } | ||
549 | |||
550 | spin_lock(&m->lock); | ||
551 | again: | ||
552 | req = list_entry(m->unsent_req_list.next, struct p9_req, | ||
553 | req_list); | ||
554 | list_move_tail(&req->req_list, &m->req_list); | ||
555 | if (req->err == ERREQFLUSH) | ||
556 | goto again; | ||
557 | |||
558 | m->wbuf = req->tcall->sdata; | ||
559 | m->wsize = req->tcall->size; | ||
560 | m->wpos = 0; | ||
561 | spin_unlock(&m->lock); | ||
562 | } | ||
563 | |||
564 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, | ||
565 | m->wsize); | ||
566 | clear_bit(Wpending, &m->wsched); | ||
567 | err = p9_fd_write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos); | ||
568 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err); | ||
569 | if (err == -EAGAIN) { | ||
570 | clear_bit(Wworksched, &m->wsched); | ||
571 | return; | ||
572 | } | ||
573 | |||
574 | if (err < 0) | ||
575 | goto error; | ||
576 | else if (err == 0) { | ||
577 | err = -EREMOTEIO; | ||
578 | goto error; | ||
579 | } | ||
580 | |||
581 | m->wpos += err; | ||
582 | if (m->wpos == m->wsize) | ||
583 | m->wpos = m->wsize = 0; | ||
584 | |||
585 | if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) { | ||
586 | if (test_and_clear_bit(Wpending, &m->wsched)) | ||
587 | n = POLLOUT; | ||
588 | else | ||
589 | n = p9_fd_poll(m->trans, NULL); | ||
590 | |||
591 | if (n & POLLOUT) { | ||
592 | P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m); | ||
593 | queue_work(p9_mux_wq, &m->wq); | ||
594 | } else | ||
595 | clear_bit(Wworksched, &m->wsched); | ||
596 | } else | ||
597 | clear_bit(Wworksched, &m->wsched); | ||
598 | |||
599 | return; | ||
600 | |||
601 | error: | ||
602 | p9_conn_cancel(m, err); | ||
603 | clear_bit(Wworksched, &m->wsched); | ||
604 | } | ||
605 | |||
606 | static void process_request(struct p9_conn *m, struct p9_req *req) | ||
607 | { | ||
608 | int ecode; | ||
609 | struct p9_str *ename; | ||
610 | |||
611 | if (!req->err && req->rcall->id == P9_RERROR) { | ||
612 | ecode = req->rcall->params.rerror.errno; | ||
613 | ename = &req->rcall->params.rerror.error; | ||
614 | |||
615 | P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len, | ||
616 | ename->str); | ||
617 | |||
618 | if (m->extended) | ||
619 | req->err = -ecode; | ||
620 | |||
621 | if (!req->err) { | ||
622 | req->err = p9_errstr2errno(ename->str, ename->len); | ||
623 | |||
624 | /* string match failed */ | ||
625 | if (!req->err) { | ||
626 | PRINT_FCALL_ERROR("unknown error", req->rcall); | ||
627 | req->err = -ESERVERFAULT; | ||
628 | } | ||
629 | } | ||
630 | } else if (req->tcall && req->rcall->id != req->tcall->id + 1) { | ||
631 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
632 | "fcall mismatch: expected %d, got %d\n", | ||
633 | req->tcall->id + 1, req->rcall->id); | ||
634 | if (!req->err) | ||
635 | req->err = -EIO; | ||
636 | } | ||
637 | } | ||
638 | |||
639 | /** | ||
640 | * p9_read_work - called when there is some data to be read from a transport | ||
641 | */ | ||
642 | static void p9_read_work(struct work_struct *work) | ||
643 | { | ||
644 | int n, err; | ||
645 | struct p9_conn *m; | ||
646 | struct p9_req *req, *rptr, *rreq; | ||
647 | struct p9_fcall *rcall; | ||
648 | char *rbuf; | ||
649 | |||
650 | m = container_of(work, struct p9_conn, rq); | ||
651 | |||
652 | if (m->err < 0) | ||
653 | return; | ||
654 | |||
655 | rcall = NULL; | ||
656 | P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos); | ||
657 | |||
658 | if (!m->rcall) { | ||
659 | m->rcall = | ||
660 | kmalloc(sizeof(struct p9_fcall) + m->msize, GFP_KERNEL); | ||
661 | if (!m->rcall) { | ||
662 | err = -ENOMEM; | ||
663 | goto error; | ||
664 | } | ||
665 | |||
666 | m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall); | ||
667 | m->rpos = 0; | ||
668 | } | ||
669 | |||
670 | clear_bit(Rpending, &m->wsched); | ||
671 | err = p9_fd_read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos); | ||
672 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err); | ||
673 | if (err == -EAGAIN) { | ||
674 | clear_bit(Rworksched, &m->wsched); | ||
675 | return; | ||
676 | } | ||
677 | |||
678 | if (err <= 0) | ||
679 | goto error; | ||
680 | |||
681 | m->rpos += err; | ||
682 | while (m->rpos > 4) { | ||
683 | n = le32_to_cpu(*(__le32 *) m->rbuf); | ||
684 | if (n >= m->msize) { | ||
685 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
686 | "requested packet size too big: %d\n", n); | ||
687 | err = -EIO; | ||
688 | goto error; | ||
689 | } | ||
690 | |||
691 | if (m->rpos < n) | ||
692 | break; | ||
693 | |||
694 | err = | ||
695 | p9_deserialize_fcall(m->rbuf, n, m->rcall, m->extended); | ||
696 | if (err < 0) | ||
697 | goto error; | ||
698 | |||
699 | #ifdef CONFIG_NET_9P_DEBUG | ||
700 | if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { | ||
701 | char buf[150]; | ||
702 | |||
703 | p9_printfcall(buf, sizeof(buf), m->rcall, | ||
704 | m->extended); | ||
705 | printk(KERN_NOTICE ">>> %p %s\n", m, buf); | ||
706 | } | ||
707 | #endif | ||
708 | |||
709 | rcall = m->rcall; | ||
710 | rbuf = m->rbuf; | ||
711 | if (m->rpos > n) { | ||
712 | m->rcall = kmalloc(sizeof(struct p9_fcall) + m->msize, | ||
713 | GFP_KERNEL); | ||
714 | if (!m->rcall) { | ||
715 | err = -ENOMEM; | ||
716 | goto error; | ||
717 | } | ||
718 | |||
719 | m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall); | ||
720 | memmove(m->rbuf, rbuf + n, m->rpos - n); | ||
721 | m->rpos -= n; | ||
722 | } else { | ||
723 | m->rcall = NULL; | ||
724 | m->rbuf = NULL; | ||
725 | m->rpos = 0; | ||
726 | } | ||
727 | |||
728 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, | ||
729 | rcall->id, rcall->tag); | ||
730 | |||
731 | req = NULL; | ||
732 | spin_lock(&m->lock); | ||
733 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | ||
734 | if (rreq->tag == rcall->tag) { | ||
735 | req = rreq; | ||
736 | if (req->flush != Flushing) | ||
737 | list_del(&req->req_list); | ||
738 | break; | ||
739 | } | ||
740 | } | ||
741 | spin_unlock(&m->lock); | ||
742 | |||
743 | if (req) { | ||
744 | req->rcall = rcall; | ||
745 | process_request(m, req); | ||
746 | |||
747 | if (req->flush != Flushing) { | ||
748 | if (req->cb) | ||
749 | (*req->cb) (req, req->cba); | ||
750 | else | ||
751 | kfree(req->rcall); | ||
752 | |||
753 | wake_up(&m->equeue); | ||
754 | } | ||
755 | } else { | ||
756 | if (err >= 0 && rcall->id != P9_RFLUSH) | ||
757 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
758 | "unexpected response mux %p id %d tag %d\n", | ||
759 | m, rcall->id, rcall->tag); | ||
760 | kfree(rcall); | ||
761 | } | ||
762 | } | ||
763 | |||
764 | if (!list_empty(&m->req_list)) { | ||
765 | if (test_and_clear_bit(Rpending, &m->wsched)) | ||
766 | n = POLLIN; | ||
767 | else | ||
768 | n = p9_fd_poll(m->trans, NULL); | ||
769 | |||
770 | if (n & POLLIN) { | ||
771 | P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m); | ||
772 | queue_work(p9_mux_wq, &m->rq); | ||
773 | } else | ||
774 | clear_bit(Rworksched, &m->wsched); | ||
775 | } else | ||
776 | clear_bit(Rworksched, &m->wsched); | ||
777 | |||
778 | return; | ||
779 | |||
780 | error: | ||
781 | p9_conn_cancel(m, err); | ||
782 | clear_bit(Rworksched, &m->wsched); | ||
783 | } | ||
784 | |||
785 | /** | ||
786 | * p9_send_request - send 9P request | ||
787 | * The function can sleep until the request is scheduled for sending. | ||
788 | * The function can be interrupted. Return from the function is not | ||
789 | * a guarantee that the request is sent successfully. Can return errors | ||
790 | * that can be retrieved by PTR_ERR macros. | ||
791 | * | ||
792 | * @m: mux data | ||
793 | * @tc: request to be sent | ||
794 | * @cb: callback function to call when response is received | ||
795 | * @cba: parameter to pass to the callback function | ||
796 | */ | ||
797 | static struct p9_req *p9_send_request(struct p9_conn *m, | ||
798 | struct p9_fcall *tc, | ||
799 | p9_conn_req_callback cb, void *cba) | ||
800 | { | ||
801 | int n; | ||
802 | struct p9_req *req; | ||
803 | |||
804 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current, | ||
805 | tc, tc->id); | ||
806 | if (m->err < 0) | ||
807 | return ERR_PTR(m->err); | ||
808 | |||
809 | req = kmalloc(sizeof(struct p9_req), GFP_KERNEL); | ||
810 | if (!req) | ||
811 | return ERR_PTR(-ENOMEM); | ||
812 | |||
813 | if (tc->id == P9_TVERSION) | ||
814 | n = P9_NOTAG; | ||
815 | else | ||
816 | n = p9_mux_get_tag(m); | ||
817 | |||
818 | if (n < 0) | ||
819 | return ERR_PTR(-ENOMEM); | ||
820 | |||
821 | p9_set_tag(tc, n); | ||
822 | |||
823 | #ifdef CONFIG_NET_9P_DEBUG | ||
824 | if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { | ||
825 | char buf[150]; | ||
826 | |||
827 | p9_printfcall(buf, sizeof(buf), tc, m->extended); | ||
828 | printk(KERN_NOTICE "<<< %p %s\n", m, buf); | ||
829 | } | ||
830 | #endif | ||
831 | |||
832 | spin_lock_init(&req->lock); | ||
833 | req->tag = n; | ||
834 | req->tcall = tc; | ||
835 | req->rcall = NULL; | ||
836 | req->err = 0; | ||
837 | req->cb = cb; | ||
838 | req->cba = cba; | ||
839 | req->flush = None; | ||
840 | |||
841 | spin_lock(&m->lock); | ||
842 | list_add_tail(&req->req_list, &m->unsent_req_list); | ||
843 | spin_unlock(&m->lock); | ||
844 | |||
845 | if (test_and_clear_bit(Wpending, &m->wsched)) | ||
846 | n = POLLOUT; | ||
847 | else | ||
848 | n = p9_fd_poll(m->trans, NULL); | ||
849 | |||
850 | if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) | ||
851 | queue_work(p9_mux_wq, &m->wq); | ||
852 | |||
853 | return req; | ||
854 | } | ||
855 | |||
856 | static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req) | ||
857 | { | ||
858 | p9_mux_put_tag(m, req->tag); | ||
859 | kfree(req); | ||
860 | } | ||
861 | |||
862 | static void p9_mux_flush_cb(struct p9_req *freq, void *a) | ||
863 | { | ||
864 | p9_conn_req_callback cb; | ||
865 | int tag; | ||
866 | struct p9_conn *m; | ||
867 | struct p9_req *req, *rreq, *rptr; | ||
868 | |||
869 | m = a; | ||
870 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, | ||
871 | freq->tcall, freq->rcall, freq->err, | ||
872 | freq->tcall->params.tflush.oldtag); | ||
873 | |||
874 | spin_lock(&m->lock); | ||
875 | cb = NULL; | ||
876 | tag = freq->tcall->params.tflush.oldtag; | ||
877 | req = NULL; | ||
878 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | ||
879 | if (rreq->tag == tag) { | ||
880 | req = rreq; | ||
881 | list_del(&req->req_list); | ||
882 | break; | ||
883 | } | ||
884 | } | ||
885 | spin_unlock(&m->lock); | ||
886 | |||
887 | if (req) { | ||
888 | spin_lock(&req->lock); | ||
889 | req->flush = Flushed; | ||
890 | spin_unlock(&req->lock); | ||
891 | |||
892 | if (req->cb) | ||
893 | (*req->cb) (req, req->cba); | ||
894 | else | ||
895 | kfree(req->rcall); | ||
896 | |||
897 | wake_up(&m->equeue); | ||
898 | } | ||
899 | |||
900 | kfree(freq->tcall); | ||
901 | kfree(freq->rcall); | ||
902 | p9_mux_free_request(m, freq); | ||
903 | } | ||
904 | |||
905 | static int | ||
906 | p9_mux_flush_request(struct p9_conn *m, struct p9_req *req) | ||
907 | { | ||
908 | struct p9_fcall *fc; | ||
909 | struct p9_req *rreq, *rptr; | ||
910 | |||
911 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); | ||
912 | |||
913 | /* if a response was received for a request, do nothing */ | ||
914 | spin_lock(&req->lock); | ||
915 | if (req->rcall || req->err) { | ||
916 | spin_unlock(&req->lock); | ||
917 | P9_DPRINTK(P9_DEBUG_MUX, | ||
918 | "mux %p req %p response already received\n", m, req); | ||
919 | return 0; | ||
920 | } | ||
921 | |||
922 | req->flush = Flushing; | ||
923 | spin_unlock(&req->lock); | ||
924 | |||
925 | spin_lock(&m->lock); | ||
926 | /* if the request is not sent yet, just remove it from the list */ | ||
927 | list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) { | ||
928 | if (rreq->tag == req->tag) { | ||
929 | P9_DPRINTK(P9_DEBUG_MUX, | ||
930 | "mux %p req %p request is not sent yet\n", m, req); | ||
931 | list_del(&rreq->req_list); | ||
932 | req->flush = Flushed; | ||
933 | spin_unlock(&m->lock); | ||
934 | if (req->cb) | ||
935 | (*req->cb) (req, req->cba); | ||
936 | return 0; | ||
937 | } | ||
938 | } | ||
939 | spin_unlock(&m->lock); | ||
940 | |||
941 | clear_thread_flag(TIF_SIGPENDING); | ||
942 | fc = p9_create_tflush(req->tag); | ||
943 | p9_send_request(m, fc, p9_mux_flush_cb, m); | ||
944 | return 1; | ||
945 | } | ||
946 | |||
947 | static void | ||
948 | p9_conn_rpc_cb(struct p9_req *req, void *a) | ||
949 | { | ||
950 | struct p9_mux_rpc *r; | ||
951 | |||
952 | P9_DPRINTK(P9_DEBUG_MUX, "req %p r %p\n", req, a); | ||
953 | r = a; | ||
954 | r->rcall = req->rcall; | ||
955 | r->err = req->err; | ||
956 | |||
957 | if (req->flush != None && !req->err) | ||
958 | r->err = -ERESTARTSYS; | ||
959 | |||
960 | wake_up(&r->wqueue); | ||
961 | } | ||
962 | |||
963 | /** | ||
964 | * p9_fd_rpc- sends 9P request and waits until a response is available. | ||
965 | * The function can be interrupted. | ||
966 | * @m: mux data | ||
967 | * @tc: request to be sent | ||
968 | * @rc: pointer where a pointer to the response is stored | ||
969 | */ | ||
970 | int | ||
971 | p9_fd_rpc(struct p9_trans *t, struct p9_fcall *tc, struct p9_fcall **rc) | ||
972 | { | ||
973 | struct p9_trans_fd *p = t->priv; | ||
974 | struct p9_conn *m = p->conn; | ||
975 | int err, sigpending; | ||
976 | unsigned long flags; | ||
977 | struct p9_req *req; | ||
978 | struct p9_mux_rpc r; | ||
979 | |||
980 | r.err = 0; | ||
981 | r.tcall = tc; | ||
982 | r.rcall = NULL; | ||
983 | r.m = m; | ||
984 | init_waitqueue_head(&r.wqueue); | ||
985 | |||
986 | if (rc) | ||
987 | *rc = NULL; | ||
988 | |||
989 | sigpending = 0; | ||
990 | if (signal_pending(current)) { | ||
991 | sigpending = 1; | ||
992 | clear_thread_flag(TIF_SIGPENDING); | ||
993 | } | ||
994 | |||
995 | req = p9_send_request(m, tc, p9_conn_rpc_cb, &r); | ||
996 | if (IS_ERR(req)) { | ||
997 | err = PTR_ERR(req); | ||
998 | P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err); | ||
999 | return err; | ||
1000 | } | ||
1001 | |||
1002 | err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); | ||
1003 | if (r.err < 0) | ||
1004 | err = r.err; | ||
1005 | |||
1006 | if (err == -ERESTARTSYS && m->trans->status == Connected | ||
1007 | && m->err == 0) { | ||
1008 | if (p9_mux_flush_request(m, req)) { | ||
1009 | /* wait until we get response of the flush message */ | ||
1010 | do { | ||
1011 | clear_thread_flag(TIF_SIGPENDING); | ||
1012 | err = wait_event_interruptible(r.wqueue, | ||
1013 | r.rcall || r.err); | ||
1014 | } while (!r.rcall && !r.err && err == -ERESTARTSYS && | ||
1015 | m->trans->status == Connected && !m->err); | ||
1016 | |||
1017 | err = -ERESTARTSYS; | ||
1018 | } | ||
1019 | sigpending = 1; | ||
1020 | } | ||
1021 | |||
1022 | if (sigpending) { | ||
1023 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
1024 | recalc_sigpending(); | ||
1025 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
1026 | } | ||
1027 | |||
1028 | if (rc) | ||
1029 | *rc = r.rcall; | ||
1030 | else | ||
1031 | kfree(r.rcall); | ||
1032 | |||
1033 | p9_mux_free_request(m, req); | ||
1034 | if (err > 0) | ||
1035 | err = -EIO; | ||
1036 | |||
1037 | return err; | ||
1038 | } | ||
1039 | |||
1040 | #ifdef P9_NONBLOCK | ||
1041 | /** | ||
1042 | * p9_conn_rpcnb - sends 9P request without waiting for response. | ||
1043 | * @m: mux data | ||
1044 | * @tc: request to be sent | ||
1045 | * @cb: callback function to be called when response arrives | ||
1046 | * @cba: value to pass to the callback function | ||
1047 | */ | ||
1048 | int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc, | ||
1049 | p9_conn_req_callback cb, void *a) | ||
1050 | { | ||
1051 | int err; | ||
1052 | struct p9_req *req; | ||
1053 | |||
1054 | req = p9_send_request(m, tc, cb, a); | ||
1055 | if (IS_ERR(req)) { | ||
1056 | err = PTR_ERR(req); | ||
1057 | P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err); | ||
1058 | return PTR_ERR(req); | ||
1059 | } | ||
1060 | |||
1061 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag); | ||
1062 | return 0; | ||
1063 | } | ||
1064 | #endif /* P9_NONBLOCK */ | ||
1065 | |||
1066 | /** | ||
1067 | * p9_conn_cancel - cancel all pending requests with error | ||
1068 | * @m: mux data | ||
1069 | * @err: error code | ||
1070 | */ | ||
1071 | void p9_conn_cancel(struct p9_conn *m, int err) | ||
1072 | { | ||
1073 | struct p9_req *req, *rtmp; | ||
1074 | LIST_HEAD(cancel_list); | ||
1075 | |||
1076 | P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); | ||
1077 | m->err = err; | ||
1078 | spin_lock(&m->lock); | ||
1079 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { | ||
1080 | list_move(&req->req_list, &cancel_list); | ||
1081 | } | ||
1082 | list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { | ||
1083 | list_move(&req->req_list, &cancel_list); | ||
1084 | } | ||
1085 | spin_unlock(&m->lock); | ||
1086 | |||
1087 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { | ||
1088 | list_del(&req->req_list); | ||
1089 | if (!req->err) | ||
1090 | req->err = err; | ||
1091 | |||
1092 | if (req->cb) | ||
1093 | (*req->cb) (req, req->cba); | ||
1094 | else | ||
1095 | kfree(req->rcall); | ||
1096 | } | ||
1097 | |||
1098 | wake_up(&m->equeue); | ||
1099 | } | ||
1100 | |||
75 | /** | 1101 | /** |
76 | * v9fs_parse_options - parse mount options into session structure | 1102 | * v9fs_parse_options - parse mount options into session structure |
77 | * @options: options string passed from mount | 1103 | * @options: options string passed from mount |
@@ -268,7 +1294,7 @@ end: | |||
268 | } | 1294 | } |
269 | 1295 | ||
270 | /** | 1296 | /** |
271 | * p9_sock_close - shutdown socket | 1297 | * p9_fd_close - shutdown socket |
272 | * @trans: private socket structure | 1298 | * @trans: private socket structure |
273 | * | 1299 | * |
274 | */ | 1300 | */ |
@@ -284,6 +1310,8 @@ static void p9_fd_close(struct p9_trans *trans) | |||
284 | if (!ts) | 1310 | if (!ts) |
285 | return; | 1311 | return; |
286 | 1312 | ||
1313 | p9_conn_destroy(ts->conn); | ||
1314 | |||
287 | trans->status = Disconnected; | 1315 | trans->status = Disconnected; |
288 | if (ts->rd) | 1316 | if (ts->rd) |
289 | fput(ts->rd); | 1317 | fput(ts->rd); |
@@ -292,13 +1320,15 @@ static void p9_fd_close(struct p9_trans *trans) | |||
292 | kfree(ts); | 1320 | kfree(ts); |
293 | } | 1321 | } |
294 | 1322 | ||
295 | static struct p9_trans *p9_trans_create_tcp(const char *addr, char *args) | 1323 | static struct p9_trans * |
1324 | p9_trans_create_tcp(const char *addr, char *args, int msize, unsigned char dotu) | ||
296 | { | 1325 | { |
297 | int err; | 1326 | int err; |
298 | struct p9_trans *trans; | 1327 | struct p9_trans *trans; |
299 | struct socket *csocket; | 1328 | struct socket *csocket; |
300 | struct sockaddr_in sin_server; | 1329 | struct sockaddr_in sin_server; |
301 | struct p9_fd_opts opts; | 1330 | struct p9_fd_opts opts; |
1331 | struct p9_trans_fd *p; | ||
302 | 1332 | ||
303 | parse_opts(args, &opts); | 1333 | parse_opts(args, &opts); |
304 | 1334 | ||
@@ -306,11 +1336,10 @@ static struct p9_trans *p9_trans_create_tcp(const char *addr, char *args) | |||
306 | trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL); | 1336 | trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL); |
307 | if (!trans) | 1337 | if (!trans) |
308 | return ERR_PTR(-ENOMEM); | 1338 | return ERR_PTR(-ENOMEM); |
309 | 1339 | trans->msize = msize; | |
310 | trans->write = p9_fd_write; | 1340 | trans->extended = dotu; |
311 | trans->read = p9_fd_read; | 1341 | trans->rpc = p9_fd_rpc; |
312 | trans->close = p9_fd_close; | 1342 | trans->close = p9_fd_close; |
313 | trans->poll = p9_fd_poll; | ||
314 | 1343 | ||
315 | sin_server.sin_family = AF_INET; | 1344 | sin_server.sin_family = AF_INET; |
316 | sin_server.sin_addr.s_addr = in_aton(addr); | 1345 | sin_server.sin_addr.s_addr = in_aton(addr); |
@@ -337,6 +1366,14 @@ static struct p9_trans *p9_trans_create_tcp(const char *addr, char *args) | |||
337 | if (err < 0) | 1366 | if (err < 0) |
338 | goto error; | 1367 | goto error; |
339 | 1368 | ||
1369 | p = (struct p9_trans_fd *) trans->priv; | ||
1370 | p->conn = p9_conn_create(trans); | ||
1371 | if (IS_ERR(p->conn)) { | ||
1372 | err = PTR_ERR(p->conn); | ||
1373 | p->conn = NULL; | ||
1374 | goto error; | ||
1375 | } | ||
1376 | |||
340 | return trans; | 1377 | return trans; |
341 | 1378 | ||
342 | error: | 1379 | error: |
@@ -347,22 +1384,23 @@ error: | |||
347 | return ERR_PTR(err); | 1384 | return ERR_PTR(err); |
348 | } | 1385 | } |
349 | 1386 | ||
350 | static struct p9_trans *p9_trans_create_unix(const char *addr, char *args) | 1387 | static struct p9_trans * |
1388 | p9_trans_create_unix(const char *addr, char *args, int msize, | ||
1389 | unsigned char dotu) | ||
351 | { | 1390 | { |
352 | int err; | 1391 | int err; |
353 | struct socket *csocket; | 1392 | struct socket *csocket; |
354 | struct sockaddr_un sun_server; | 1393 | struct sockaddr_un sun_server; |
355 | struct p9_trans *trans; | 1394 | struct p9_trans *trans; |
1395 | struct p9_trans_fd *p; | ||
356 | 1396 | ||
357 | csocket = NULL; | 1397 | csocket = NULL; |
358 | trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL); | 1398 | trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL); |
359 | if (!trans) | 1399 | if (!trans) |
360 | return ERR_PTR(-ENOMEM); | 1400 | return ERR_PTR(-ENOMEM); |
361 | 1401 | ||
362 | trans->write = p9_fd_write; | 1402 | trans->rpc = p9_fd_rpc; |
363 | trans->read = p9_fd_read; | ||
364 | trans->close = p9_fd_close; | 1403 | trans->close = p9_fd_close; |
365 | trans->poll = p9_fd_poll; | ||
366 | 1404 | ||
367 | if (strlen(addr) > UNIX_PATH_MAX) { | 1405 | if (strlen(addr) > UNIX_PATH_MAX) { |
368 | P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", | 1406 | P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", |
@@ -387,6 +1425,16 @@ static struct p9_trans *p9_trans_create_unix(const char *addr, char *args) | |||
387 | if (err < 0) | 1425 | if (err < 0) |
388 | goto error; | 1426 | goto error; |
389 | 1427 | ||
1428 | trans->msize = msize; | ||
1429 | trans->extended = dotu; | ||
1430 | p = (struct p9_trans_fd *) trans->priv; | ||
1431 | p->conn = p9_conn_create(trans); | ||
1432 | if (IS_ERR(p->conn)) { | ||
1433 | err = PTR_ERR(p->conn); | ||
1434 | p->conn = NULL; | ||
1435 | goto error; | ||
1436 | } | ||
1437 | |||
390 | return trans; | 1438 | return trans; |
391 | 1439 | ||
392 | error: | 1440 | error: |
@@ -397,11 +1445,14 @@ error: | |||
397 | return ERR_PTR(err); | 1445 | return ERR_PTR(err); |
398 | } | 1446 | } |
399 | 1447 | ||
400 | static struct p9_trans *p9_trans_create_fd(const char *name, char *args) | 1448 | static struct p9_trans * |
1449 | p9_trans_create_fd(const char *name, char *args, int msize, | ||
1450 | unsigned char extended) | ||
401 | { | 1451 | { |
402 | int err; | 1452 | int err; |
403 | struct p9_trans *trans; | 1453 | struct p9_trans *trans; |
404 | struct p9_fd_opts opts; | 1454 | struct p9_fd_opts opts; |
1455 | struct p9_trans_fd *p; | ||
405 | 1456 | ||
406 | parse_opts(args, &opts); | 1457 | parse_opts(args, &opts); |
407 | 1458 | ||
@@ -414,15 +1465,23 @@ static struct p9_trans *p9_trans_create_fd(const char *name, char *args) | |||
414 | if (!trans) | 1465 | if (!trans) |
415 | return ERR_PTR(-ENOMEM); | 1466 | return ERR_PTR(-ENOMEM); |
416 | 1467 | ||
417 | trans->write = p9_fd_write; | 1468 | trans->rpc = p9_fd_rpc; |
418 | trans->read = p9_fd_read; | ||
419 | trans->close = p9_fd_close; | 1469 | trans->close = p9_fd_close; |
420 | trans->poll = p9_fd_poll; | ||
421 | 1470 | ||
422 | err = p9_fd_open(trans, opts.rfd, opts.wfd); | 1471 | err = p9_fd_open(trans, opts.rfd, opts.wfd); |
423 | if (err < 0) | 1472 | if (err < 0) |
424 | goto error; | 1473 | goto error; |
425 | 1474 | ||
1475 | trans->msize = msize; | ||
1476 | trans->extended = extended; | ||
1477 | p = (struct p9_trans_fd *) trans->priv; | ||
1478 | p->conn = p9_conn_create(trans); | ||
1479 | if (IS_ERR(p->conn)) { | ||
1480 | err = PTR_ERR(p->conn); | ||
1481 | p->conn = NULL; | ||
1482 | goto error; | ||
1483 | } | ||
1484 | |||
426 | return trans; | 1485 | return trans; |
427 | 1486 | ||
428 | error: | 1487 | error: |
@@ -453,6 +1512,12 @@ static struct p9_trans_module p9_fd_trans = { | |||
453 | 1512 | ||
454 | static int __init p9_trans_fd_init(void) | 1513 | static int __init p9_trans_fd_init(void) |
455 | { | 1514 | { |
1515 | int ret = p9_mux_global_init(); | ||
1516 | if (ret) { | ||
1517 | printk(KERN_WARNING "9p: starting mux failed\n"); | ||
1518 | return ret; | ||
1519 | } | ||
1520 | |||
456 | v9fs_register_trans(&p9_tcp_trans); | 1521 | v9fs_register_trans(&p9_tcp_trans); |
457 | v9fs_register_trans(&p9_unix_trans); | 1522 | v9fs_register_trans(&p9_unix_trans); |
458 | v9fs_register_trans(&p9_fd_trans); | 1523 | v9fs_register_trans(&p9_fd_trans); |
@@ -460,13 +1525,7 @@ static int __init p9_trans_fd_init(void) | |||
460 | return 1; | 1525 | return 1; |
461 | } | 1526 | } |
462 | 1527 | ||
463 | static void __exit p9_trans_fd_exit(void) { | ||
464 | printk(KERN_ERR "Removal of 9p transports not implemented\n"); | ||
465 | BUG(); | ||
466 | } | ||
467 | |||
468 | module_init(p9_trans_fd_init); | 1528 | module_init(p9_trans_fd_init); |
469 | module_exit(p9_trans_fd_exit); | ||
470 | 1529 | ||
471 | MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>"); | 1530 | MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>"); |
472 | MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>"); | 1531 | MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>"); |