diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/9p/Makefile | 1 | ||||
-rw-r--r-- | net/9p/client.c | 115 | ||||
-rw-r--r-- | net/9p/mod.c | 9 | ||||
-rw-r--r-- | net/9p/mux.c | 1060 | ||||
-rw-r--r-- | net/9p/trans_fd.c | 1103 | ||||
-rw-r--r-- | net/9p/trans_virtio.c | 44 |
6 files changed, 1187 insertions, 1145 deletions
diff --git a/net/9p/Makefile b/net/9p/Makefile index d3abb246ccab..8a1051101898 100644 --- a/net/9p/Makefile +++ b/net/9p/Makefile | |||
@@ -4,7 +4,6 @@ obj-$(CONFIG_NET_9P_VIRTIO) += 9pnet_virtio.o | |||
4 | 4 | ||
5 | 9pnet-objs := \ | 5 | 9pnet-objs := \ |
6 | mod.o \ | 6 | mod.o \ |
7 | mux.o \ | ||
8 | client.o \ | 7 | client.o \ |
9 | conv.o \ | 8 | conv.o \ |
10 | error.o \ | 9 | error.o \ |
diff --git a/net/9p/client.c b/net/9p/client.c index 433b30713ef6..84e087e24146 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * 9P Client | 4 | * 9P Client |
5 | * | 5 | * |
6 | * Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com> | ||
6 | * Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net> | 7 | * Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net> |
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
@@ -25,6 +26,7 @@ | |||
25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
26 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
27 | #include <linux/fs.h> | 28 | #include <linux/fs.h> |
29 | #include <linux/poll.h> | ||
28 | #include <linux/idr.h> | 30 | #include <linux/idr.h> |
29 | #include <linux/mutex.h> | 31 | #include <linux/mutex.h> |
30 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
@@ -32,13 +34,82 @@ | |||
32 | #include <net/9p/9p.h> | 34 | #include <net/9p/9p.h> |
33 | #include <linux/parser.h> | 35 | #include <linux/parser.h> |
34 | #include <net/9p/transport.h> | 36 | #include <net/9p/transport.h> |
35 | #include <net/9p/conn.h> | ||
36 | #include <net/9p/client.h> | 37 | #include <net/9p/client.h> |
37 | 38 | ||
38 | static struct p9_fid *p9_fid_create(struct p9_client *clnt); | 39 | static struct p9_fid *p9_fid_create(struct p9_client *clnt); |
39 | static void p9_fid_destroy(struct p9_fid *fid); | 40 | static void p9_fid_destroy(struct p9_fid *fid); |
40 | static struct p9_stat *p9_clone_stat(struct p9_stat *st, int dotu); | 41 | static struct p9_stat *p9_clone_stat(struct p9_stat *st, int dotu); |
41 | 42 | ||
43 | /* | ||
44 | * Client Option Parsing (code inspired by NFS code) | ||
45 | * - a little lazy - parse all client options | ||
46 | */ | ||
47 | |||
48 | enum { | ||
49 | Opt_msize, | ||
50 | Opt_trans, | ||
51 | Opt_legacy, | ||
52 | Opt_err, | ||
53 | }; | ||
54 | |||
55 | static match_table_t tokens = { | ||
56 | {Opt_msize, "msize=%u"}, | ||
57 | {Opt_legacy, "noextend"}, | ||
58 | {Opt_trans, "trans=%s"}, | ||
59 | {Opt_err, NULL}, | ||
60 | }; | ||
61 | |||
62 | /** | ||
63 | * v9fs_parse_options - parse mount options into session structure | ||
64 | * @options: options string passed from mount | ||
65 | * @v9ses: existing v9fs session information | ||
66 | * | ||
67 | */ | ||
68 | |||
69 | static void parse_opts(char *options, struct p9_client *clnt) | ||
70 | { | ||
71 | char *p; | ||
72 | substring_t args[MAX_OPT_ARGS]; | ||
73 | int option; | ||
74 | int ret; | ||
75 | |||
76 | clnt->trans_mod = v9fs_default_trans(); | ||
77 | clnt->dotu = 1; | ||
78 | clnt->msize = 8192; | ||
79 | |||
80 | if (!options) | ||
81 | return; | ||
82 | |||
83 | while ((p = strsep(&options, ",")) != NULL) { | ||
84 | int token; | ||
85 | if (!*p) | ||
86 | continue; | ||
87 | token = match_token(p, tokens, args); | ||
88 | if (token < Opt_trans) { | ||
89 | ret = match_int(&args[0], &option); | ||
90 | if (ret < 0) { | ||
91 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
92 | "integer field, but no integer?\n"); | ||
93 | continue; | ||
94 | } | ||
95 | } | ||
96 | switch (token) { | ||
97 | case Opt_msize: | ||
98 | clnt->msize = option; | ||
99 | break; | ||
100 | case Opt_trans: | ||
101 | clnt->trans_mod = v9fs_match_trans(&args[0]); | ||
102 | break; | ||
103 | case Opt_legacy: | ||
104 | clnt->dotu = 0; | ||
105 | break; | ||
106 | default: | ||
107 | continue; | ||
108 | } | ||
109 | } | ||
110 | } | ||
111 | |||
112 | |||
42 | /** | 113 | /** |
43 | * p9_client_rpc - sends 9P request and waits until a response is available. | 114 | * p9_client_rpc - sends 9P request and waits until a response is available. |
44 | * The function can be interrupted. | 115 | * The function can be interrupted. |
@@ -50,14 +121,10 @@ int | |||
50 | p9_client_rpc(struct p9_client *c, struct p9_fcall *tc, | 121 | p9_client_rpc(struct p9_client *c, struct p9_fcall *tc, |
51 | struct p9_fcall **rc) | 122 | struct p9_fcall **rc) |
52 | { | 123 | { |
53 | if (c->trans->rpc) | 124 | return c->trans->rpc(c->trans, tc, rc); |
54 | return c->trans->rpc(c->trans, tc, rc, c->msize, c->dotu); | ||
55 | else | ||
56 | return p9_conn_rpc(c->conn, tc, rc); | ||
57 | } | 125 | } |
58 | 126 | ||
59 | struct p9_client *p9_client_create(struct p9_trans *trans, int msize, | 127 | struct p9_client *p9_client_create(const char *dev_name, char *options) |
60 | int dotu) | ||
61 | { | 128 | { |
62 | int err, n; | 129 | int err, n; |
63 | struct p9_client *clnt; | 130 | struct p9_client *clnt; |
@@ -71,12 +138,7 @@ struct p9_client *p9_client_create(struct p9_trans *trans, int msize, | |||
71 | if (!clnt) | 138 | if (!clnt) |
72 | return ERR_PTR(-ENOMEM); | 139 | return ERR_PTR(-ENOMEM); |
73 | 140 | ||
74 | P9_DPRINTK(P9_DEBUG_9P, "clnt %p trans %p msize %d dotu %d\n", | ||
75 | clnt, trans, msize, dotu); | ||
76 | spin_lock_init(&clnt->lock); | 141 | spin_lock_init(&clnt->lock); |
77 | clnt->trans = trans; | ||
78 | clnt->msize = msize; | ||
79 | clnt->dotu = dotu; | ||
80 | INIT_LIST_HEAD(&clnt->fidlist); | 142 | INIT_LIST_HEAD(&clnt->fidlist); |
81 | clnt->fidpool = p9_idpool_create(); | 143 | clnt->fidpool = p9_idpool_create(); |
82 | if (!clnt->fidpool) { | 144 | if (!clnt->fidpool) { |
@@ -85,13 +147,29 @@ struct p9_client *p9_client_create(struct p9_trans *trans, int msize, | |||
85 | goto error; | 147 | goto error; |
86 | } | 148 | } |
87 | 149 | ||
88 | clnt->conn = p9_conn_create(clnt->trans, clnt->msize, &clnt->dotu); | 150 | parse_opts(options, clnt); |
89 | if (IS_ERR(clnt->conn)) { | 151 | if (clnt->trans_mod == NULL) { |
90 | err = PTR_ERR(clnt->conn); | 152 | err = -EPROTONOSUPPORT; |
91 | clnt->conn = NULL; | 153 | P9_DPRINTK(P9_DEBUG_ERROR, |
154 | "No transport defined or default transport\n"); | ||
155 | goto error; | ||
156 | } | ||
157 | |||
158 | P9_DPRINTK(P9_DEBUG_9P, "clnt %p trans %p msize %d dotu %d\n", | ||
159 | clnt, clnt->trans_mod, clnt->msize, clnt->dotu); | ||
160 | |||
161 | |||
162 | clnt->trans = clnt->trans_mod->create(dev_name, options, clnt->msize, | ||
163 | clnt->dotu); | ||
164 | if (IS_ERR(clnt->trans)) { | ||
165 | err = PTR_ERR(clnt->trans); | ||
166 | clnt->trans = NULL; | ||
92 | goto error; | 167 | goto error; |
93 | } | 168 | } |
94 | 169 | ||
170 | if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize) | ||
171 | clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ; | ||
172 | |||
95 | tc = p9_create_tversion(clnt->msize, clnt->dotu?"9P2000.u":"9P2000"); | 173 | tc = p9_create_tversion(clnt->msize, clnt->dotu?"9P2000.u":"9P2000"); |
96 | if (IS_ERR(tc)) { | 174 | if (IS_ERR(tc)) { |
97 | err = PTR_ERR(tc); | 175 | err = PTR_ERR(tc); |
@@ -134,10 +212,6 @@ void p9_client_destroy(struct p9_client *clnt) | |||
134 | struct p9_fid *fid, *fidptr; | 212 | struct p9_fid *fid, *fidptr; |
135 | 213 | ||
136 | P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt); | 214 | P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt); |
137 | if (clnt->conn) { | ||
138 | p9_conn_destroy(clnt->conn); | ||
139 | clnt->conn = NULL; | ||
140 | } | ||
141 | 215 | ||
142 | if (clnt->trans) { | 216 | if (clnt->trans) { |
143 | clnt->trans->close(clnt->trans); | 217 | clnt->trans->close(clnt->trans); |
@@ -159,7 +233,6 @@ void p9_client_disconnect(struct p9_client *clnt) | |||
159 | { | 233 | { |
160 | P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt); | 234 | P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt); |
161 | clnt->trans->status = Disconnected; | 235 | clnt->trans->status = Disconnected; |
162 | p9_conn_cancel(clnt->conn, -EIO); | ||
163 | } | 236 | } |
164 | EXPORT_SYMBOL(p9_client_disconnect); | 237 | EXPORT_SYMBOL(p9_client_disconnect); |
165 | 238 | ||
diff --git a/net/9p/mod.c b/net/9p/mod.c index 8f9763a9dc12..c285aab2af04 100644 --- a/net/9p/mod.c +++ b/net/9p/mod.c | |||
@@ -106,15 +106,10 @@ EXPORT_SYMBOL(v9fs_default_trans); | |||
106 | */ | 106 | */ |
107 | static int __init init_p9(void) | 107 | static int __init init_p9(void) |
108 | { | 108 | { |
109 | int ret; | 109 | int ret = 0; |
110 | 110 | ||
111 | p9_error_init(); | 111 | p9_error_init(); |
112 | printk(KERN_INFO "Installing 9P2000 support\n"); | 112 | printk(KERN_INFO "Installing 9P2000 support\n"); |
113 | ret = p9_mux_global_init(); | ||
114 | if (ret) { | ||
115 | printk(KERN_WARNING "9p: starting mux failed\n"); | ||
116 | return ret; | ||
117 | } | ||
118 | 113 | ||
119 | return ret; | 114 | return ret; |
120 | } | 115 | } |
@@ -126,7 +121,7 @@ static int __init init_p9(void) | |||
126 | 121 | ||
127 | static void __exit exit_p9(void) | 122 | static void __exit exit_p9(void) |
128 | { | 123 | { |
129 | p9_mux_global_exit(); | 124 | printk(KERN_INFO "Unloading 9P2000 support\n"); |
130 | } | 125 | } |
131 | 126 | ||
132 | module_init(init_p9) | 127 | module_init(init_p9) |
diff --git a/net/9p/mux.c b/net/9p/mux.c deleted file mode 100644 index c9f0805048e4..000000000000 --- a/net/9p/mux.c +++ /dev/null | |||
@@ -1,1060 +0,0 @@ | |||
1 | /* | ||
2 | * net/9p/mux.c | ||
3 | * | ||
4 | * Protocol Multiplexer | ||
5 | * | ||
6 | * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> | ||
7 | * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 | ||
11 | * as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to: | ||
20 | * Free Software Foundation | ||
21 | * 51 Franklin Street, Fifth Floor | ||
22 | * Boston, MA 02111-1301 USA | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/fs.h> | ||
29 | #include <linux/poll.h> | ||
30 | #include <linux/kthread.h> | ||
31 | #include <linux/idr.h> | ||
32 | #include <linux/mutex.h> | ||
33 | #include <net/9p/9p.h> | ||
34 | #include <linux/parser.h> | ||
35 | #include <net/9p/transport.h> | ||
36 | #include <net/9p/conn.h> | ||
37 | |||
38 | #define ERREQFLUSH 1 | ||
39 | #define SCHED_TIMEOUT 10 | ||
40 | #define MAXPOLLWADDR 2 | ||
41 | |||
42 | enum { | ||
43 | Rworksched = 1, /* read work scheduled or running */ | ||
44 | Rpending = 2, /* can read */ | ||
45 | Wworksched = 4, /* write work scheduled or running */ | ||
46 | Wpending = 8, /* can write */ | ||
47 | }; | ||
48 | |||
49 | enum { | ||
50 | None, | ||
51 | Flushing, | ||
52 | Flushed, | ||
53 | }; | ||
54 | |||
55 | struct p9_mux_poll_task; | ||
56 | |||
57 | struct p9_req { | ||
58 | spinlock_t lock; /* protect request structure */ | ||
59 | int tag; | ||
60 | struct p9_fcall *tcall; | ||
61 | struct p9_fcall *rcall; | ||
62 | int err; | ||
63 | p9_conn_req_callback cb; | ||
64 | void *cba; | ||
65 | int flush; | ||
66 | struct list_head req_list; | ||
67 | }; | ||
68 | |||
69 | struct p9_conn { | ||
70 | spinlock_t lock; /* protect lock structure */ | ||
71 | struct list_head mux_list; | ||
72 | struct p9_mux_poll_task *poll_task; | ||
73 | int msize; | ||
74 | unsigned char *extended; | ||
75 | struct p9_trans *trans; | ||
76 | struct p9_idpool *tagpool; | ||
77 | int err; | ||
78 | wait_queue_head_t equeue; | ||
79 | struct list_head req_list; | ||
80 | struct list_head unsent_req_list; | ||
81 | struct p9_fcall *rcall; | ||
82 | int rpos; | ||
83 | char *rbuf; | ||
84 | int wpos; | ||
85 | int wsize; | ||
86 | char *wbuf; | ||
87 | wait_queue_t poll_wait[MAXPOLLWADDR]; | ||
88 | wait_queue_head_t *poll_waddr[MAXPOLLWADDR]; | ||
89 | poll_table pt; | ||
90 | struct work_struct rq; | ||
91 | struct work_struct wq; | ||
92 | unsigned long wsched; | ||
93 | }; | ||
94 | |||
95 | struct p9_mux_poll_task { | ||
96 | struct task_struct *task; | ||
97 | struct list_head mux_list; | ||
98 | int muxnum; | ||
99 | }; | ||
100 | |||
101 | struct p9_mux_rpc { | ||
102 | struct p9_conn *m; | ||
103 | int err; | ||
104 | struct p9_fcall *tcall; | ||
105 | struct p9_fcall *rcall; | ||
106 | wait_queue_head_t wqueue; | ||
107 | }; | ||
108 | |||
109 | static int p9_poll_proc(void *); | ||
110 | static void p9_read_work(struct work_struct *work); | ||
111 | static void p9_write_work(struct work_struct *work); | ||
112 | static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, | ||
113 | poll_table * p); | ||
114 | static u16 p9_mux_get_tag(struct p9_conn *); | ||
115 | static void p9_mux_put_tag(struct p9_conn *, u16); | ||
116 | |||
117 | static DEFINE_MUTEX(p9_mux_task_lock); | ||
118 | static struct workqueue_struct *p9_mux_wq; | ||
119 | |||
120 | static int p9_mux_num; | ||
121 | static int p9_mux_poll_task_num; | ||
122 | static struct p9_mux_poll_task p9_mux_poll_tasks[100]; | ||
123 | |||
124 | int p9_mux_global_init(void) | ||
125 | { | ||
126 | int i; | ||
127 | |||
128 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) | ||
129 | p9_mux_poll_tasks[i].task = NULL; | ||
130 | |||
131 | p9_mux_wq = create_workqueue("v9fs"); | ||
132 | if (!p9_mux_wq) { | ||
133 | printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n"); | ||
134 | return -ENOMEM; | ||
135 | } | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | void p9_mux_global_exit(void) | ||
141 | { | ||
142 | destroy_workqueue(p9_mux_wq); | ||
143 | } | ||
144 | |||
145 | /** | ||
146 | * p9_mux_calc_poll_procs - calculates the number of polling procs | ||
147 | * based on the number of mounted v9fs filesystems. | ||
148 | * | ||
149 | * The current implementation returns sqrt of the number of mounts. | ||
150 | */ | ||
151 | static int p9_mux_calc_poll_procs(int muxnum) | ||
152 | { | ||
153 | int n; | ||
154 | |||
155 | if (p9_mux_poll_task_num) | ||
156 | n = muxnum / p9_mux_poll_task_num + | ||
157 | (muxnum % p9_mux_poll_task_num ? 1 : 0); | ||
158 | else | ||
159 | n = 1; | ||
160 | |||
161 | if (n > ARRAY_SIZE(p9_mux_poll_tasks)) | ||
162 | n = ARRAY_SIZE(p9_mux_poll_tasks); | ||
163 | |||
164 | return n; | ||
165 | } | ||
166 | |||
167 | static int p9_mux_poll_start(struct p9_conn *m) | ||
168 | { | ||
169 | int i, n; | ||
170 | struct p9_mux_poll_task *vpt, *vptlast; | ||
171 | struct task_struct *pproc; | ||
172 | |||
173 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, p9_mux_num, | ||
174 | p9_mux_poll_task_num); | ||
175 | mutex_lock(&p9_mux_task_lock); | ||
176 | |||
177 | n = p9_mux_calc_poll_procs(p9_mux_num + 1); | ||
178 | if (n > p9_mux_poll_task_num) { | ||
179 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) { | ||
180 | if (p9_mux_poll_tasks[i].task == NULL) { | ||
181 | vpt = &p9_mux_poll_tasks[i]; | ||
182 | P9_DPRINTK(P9_DEBUG_MUX, "create proc %p\n", | ||
183 | vpt); | ||
184 | pproc = kthread_create(p9_poll_proc, vpt, | ||
185 | "v9fs-poll"); | ||
186 | |||
187 | if (!IS_ERR(pproc)) { | ||
188 | vpt->task = pproc; | ||
189 | INIT_LIST_HEAD(&vpt->mux_list); | ||
190 | vpt->muxnum = 0; | ||
191 | p9_mux_poll_task_num++; | ||
192 | wake_up_process(vpt->task); | ||
193 | } | ||
194 | break; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) | ||
199 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
200 | "warning: no free poll slots\n"); | ||
201 | } | ||
202 | |||
203 | n = (p9_mux_num + 1) / p9_mux_poll_task_num + | ||
204 | ((p9_mux_num + 1) % p9_mux_poll_task_num ? 1 : 0); | ||
205 | |||
206 | vptlast = NULL; | ||
207 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) { | ||
208 | vpt = &p9_mux_poll_tasks[i]; | ||
209 | if (vpt->task != NULL) { | ||
210 | vptlast = vpt; | ||
211 | if (vpt->muxnum < n) { | ||
212 | P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); | ||
213 | list_add(&m->mux_list, &vpt->mux_list); | ||
214 | vpt->muxnum++; | ||
215 | m->poll_task = vpt; | ||
216 | memset(&m->poll_waddr, 0, | ||
217 | sizeof(m->poll_waddr)); | ||
218 | init_poll_funcptr(&m->pt, p9_pollwait); | ||
219 | break; | ||
220 | } | ||
221 | } | ||
222 | } | ||
223 | |||
224 | if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) { | ||
225 | if (vptlast == NULL) { | ||
226 | mutex_unlock(&p9_mux_task_lock); | ||
227 | return -ENOMEM; | ||
228 | } | ||
229 | |||
230 | P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); | ||
231 | list_add(&m->mux_list, &vptlast->mux_list); | ||
232 | vptlast->muxnum++; | ||
233 | m->poll_task = vptlast; | ||
234 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); | ||
235 | init_poll_funcptr(&m->pt, p9_pollwait); | ||
236 | } | ||
237 | |||
238 | p9_mux_num++; | ||
239 | mutex_unlock(&p9_mux_task_lock); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static void p9_mux_poll_stop(struct p9_conn *m) | ||
245 | { | ||
246 | int i; | ||
247 | struct p9_mux_poll_task *vpt; | ||
248 | |||
249 | mutex_lock(&p9_mux_task_lock); | ||
250 | vpt = m->poll_task; | ||
251 | list_del(&m->mux_list); | ||
252 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { | ||
253 | if (m->poll_waddr[i] != NULL) { | ||
254 | remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]); | ||
255 | m->poll_waddr[i] = NULL; | ||
256 | } | ||
257 | } | ||
258 | vpt->muxnum--; | ||
259 | if (!vpt->muxnum) { | ||
260 | P9_DPRINTK(P9_DEBUG_MUX, "destroy proc %p\n", vpt); | ||
261 | kthread_stop(vpt->task); | ||
262 | vpt->task = NULL; | ||
263 | p9_mux_poll_task_num--; | ||
264 | } | ||
265 | p9_mux_num--; | ||
266 | mutex_unlock(&p9_mux_task_lock); | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * p9_conn_create - allocate and initialize the per-session mux data | ||
271 | * Creates the polling task if this is the first session. | ||
272 | * | ||
273 | * @trans - transport structure | ||
274 | * @msize - maximum message size | ||
275 | * @extended - pointer to the extended flag | ||
276 | */ | ||
277 | struct p9_conn *p9_conn_create(struct p9_trans *trans, int msize, | ||
278 | unsigned char *extended) | ||
279 | { | ||
280 | int i, n; | ||
281 | struct p9_conn *m, *mtmp; | ||
282 | |||
283 | P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans, msize); | ||
284 | m = kmalloc(sizeof(struct p9_conn), GFP_KERNEL); | ||
285 | if (!m) | ||
286 | return ERR_PTR(-ENOMEM); | ||
287 | |||
288 | spin_lock_init(&m->lock); | ||
289 | INIT_LIST_HEAD(&m->mux_list); | ||
290 | m->msize = msize; | ||
291 | m->extended = extended; | ||
292 | m->trans = trans; | ||
293 | m->tagpool = p9_idpool_create(); | ||
294 | if (IS_ERR(m->tagpool)) { | ||
295 | mtmp = ERR_PTR(-ENOMEM); | ||
296 | kfree(m); | ||
297 | return mtmp; | ||
298 | } | ||
299 | |||
300 | m->err = 0; | ||
301 | init_waitqueue_head(&m->equeue); | ||
302 | INIT_LIST_HEAD(&m->req_list); | ||
303 | INIT_LIST_HEAD(&m->unsent_req_list); | ||
304 | m->rcall = NULL; | ||
305 | m->rpos = 0; | ||
306 | m->rbuf = NULL; | ||
307 | m->wpos = m->wsize = 0; | ||
308 | m->wbuf = NULL; | ||
309 | INIT_WORK(&m->rq, p9_read_work); | ||
310 | INIT_WORK(&m->wq, p9_write_work); | ||
311 | m->wsched = 0; | ||
312 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); | ||
313 | m->poll_task = NULL; | ||
314 | n = p9_mux_poll_start(m); | ||
315 | if (n) { | ||
316 | kfree(m); | ||
317 | return ERR_PTR(n); | ||
318 | } | ||
319 | |||
320 | n = trans->poll(trans, &m->pt); | ||
321 | if (n & POLLIN) { | ||
322 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m); | ||
323 | set_bit(Rpending, &m->wsched); | ||
324 | } | ||
325 | |||
326 | if (n & POLLOUT) { | ||
327 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m); | ||
328 | set_bit(Wpending, &m->wsched); | ||
329 | } | ||
330 | |||
331 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { | ||
332 | if (IS_ERR(m->poll_waddr[i])) { | ||
333 | p9_mux_poll_stop(m); | ||
334 | mtmp = (void *)m->poll_waddr; /* the error code */ | ||
335 | kfree(m); | ||
336 | m = mtmp; | ||
337 | break; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | return m; | ||
342 | } | ||
343 | EXPORT_SYMBOL(p9_conn_create); | ||
344 | |||
345 | /** | ||
346 | * p9_mux_destroy - cancels all pending requests and frees mux resources | ||
347 | */ | ||
348 | void p9_conn_destroy(struct p9_conn *m) | ||
349 | { | ||
350 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m, | ||
351 | m->mux_list.prev, m->mux_list.next); | ||
352 | p9_conn_cancel(m, -ECONNRESET); | ||
353 | |||
354 | if (!list_empty(&m->req_list)) { | ||
355 | /* wait until all processes waiting on this session exit */ | ||
356 | P9_DPRINTK(P9_DEBUG_MUX, | ||
357 | "mux %p waiting for empty request queue\n", m); | ||
358 | wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000); | ||
359 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p request queue empty: %d\n", m, | ||
360 | list_empty(&m->req_list)); | ||
361 | } | ||
362 | |||
363 | p9_mux_poll_stop(m); | ||
364 | m->trans = NULL; | ||
365 | p9_idpool_destroy(m->tagpool); | ||
366 | kfree(m); | ||
367 | } | ||
368 | EXPORT_SYMBOL(p9_conn_destroy); | ||
369 | |||
370 | /** | ||
371 | * p9_pollwait - called by files poll operation to add v9fs-poll task | ||
372 | * to files wait queue | ||
373 | */ | ||
374 | static void | ||
375 | p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, | ||
376 | poll_table * p) | ||
377 | { | ||
378 | int i; | ||
379 | struct p9_conn *m; | ||
380 | |||
381 | m = container_of(p, struct p9_conn, pt); | ||
382 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) | ||
383 | if (m->poll_waddr[i] == NULL) | ||
384 | break; | ||
385 | |||
386 | if (i >= ARRAY_SIZE(m->poll_waddr)) { | ||
387 | P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n"); | ||
388 | return; | ||
389 | } | ||
390 | |||
391 | m->poll_waddr[i] = wait_address; | ||
392 | |||
393 | if (!wait_address) { | ||
394 | P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n"); | ||
395 | m->poll_waddr[i] = ERR_PTR(-EIO); | ||
396 | return; | ||
397 | } | ||
398 | |||
399 | init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task); | ||
400 | add_wait_queue(wait_address, &m->poll_wait[i]); | ||
401 | } | ||
402 | |||
403 | /** | ||
404 | * p9_poll_mux - polls a mux and schedules read or write works if necessary | ||
405 | */ | ||
406 | static void p9_poll_mux(struct p9_conn *m) | ||
407 | { | ||
408 | int n; | ||
409 | |||
410 | if (m->err < 0) | ||
411 | return; | ||
412 | |||
413 | n = m->trans->poll(m->trans, NULL); | ||
414 | if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) { | ||
415 | P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n); | ||
416 | if (n >= 0) | ||
417 | n = -ECONNRESET; | ||
418 | p9_conn_cancel(m, n); | ||
419 | } | ||
420 | |||
421 | if (n & POLLIN) { | ||
422 | set_bit(Rpending, &m->wsched); | ||
423 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m); | ||
424 | if (!test_and_set_bit(Rworksched, &m->wsched)) { | ||
425 | P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m); | ||
426 | queue_work(p9_mux_wq, &m->rq); | ||
427 | } | ||
428 | } | ||
429 | |||
430 | if (n & POLLOUT) { | ||
431 | set_bit(Wpending, &m->wsched); | ||
432 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m); | ||
433 | if ((m->wsize || !list_empty(&m->unsent_req_list)) | ||
434 | && !test_and_set_bit(Wworksched, &m->wsched)) { | ||
435 | P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m); | ||
436 | queue_work(p9_mux_wq, &m->wq); | ||
437 | } | ||
438 | } | ||
439 | } | ||
440 | |||
441 | /** | ||
442 | * p9_poll_proc - polls all v9fs transports for new events and queues | ||
443 | * the appropriate work to the work queue | ||
444 | */ | ||
445 | static int p9_poll_proc(void *a) | ||
446 | { | ||
447 | struct p9_conn *m, *mtmp; | ||
448 | struct p9_mux_poll_task *vpt; | ||
449 | |||
450 | vpt = a; | ||
451 | P9_DPRINTK(P9_DEBUG_MUX, "start %p %p\n", current, vpt); | ||
452 | while (!kthread_should_stop()) { | ||
453 | set_current_state(TASK_INTERRUPTIBLE); | ||
454 | |||
455 | list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) { | ||
456 | p9_poll_mux(m); | ||
457 | } | ||
458 | |||
459 | P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n"); | ||
460 | schedule_timeout(SCHED_TIMEOUT * HZ); | ||
461 | } | ||
462 | |||
463 | __set_current_state(TASK_RUNNING); | ||
464 | P9_DPRINTK(P9_DEBUG_MUX, "finish\n"); | ||
465 | return 0; | ||
466 | } | ||
467 | |||
468 | /** | ||
469 | * p9_write_work - called when a transport can send some data | ||
470 | */ | ||
471 | static void p9_write_work(struct work_struct *work) | ||
472 | { | ||
473 | int n, err; | ||
474 | struct p9_conn *m; | ||
475 | struct p9_req *req; | ||
476 | |||
477 | m = container_of(work, struct p9_conn, wq); | ||
478 | |||
479 | if (m->err < 0) { | ||
480 | clear_bit(Wworksched, &m->wsched); | ||
481 | return; | ||
482 | } | ||
483 | |||
484 | if (!m->wsize) { | ||
485 | if (list_empty(&m->unsent_req_list)) { | ||
486 | clear_bit(Wworksched, &m->wsched); | ||
487 | return; | ||
488 | } | ||
489 | |||
490 | spin_lock(&m->lock); | ||
491 | again: | ||
492 | req = list_entry(m->unsent_req_list.next, struct p9_req, | ||
493 | req_list); | ||
494 | list_move_tail(&req->req_list, &m->req_list); | ||
495 | if (req->err == ERREQFLUSH) | ||
496 | goto again; | ||
497 | |||
498 | m->wbuf = req->tcall->sdata; | ||
499 | m->wsize = req->tcall->size; | ||
500 | m->wpos = 0; | ||
501 | spin_unlock(&m->lock); | ||
502 | } | ||
503 | |||
504 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, | ||
505 | m->wsize); | ||
506 | clear_bit(Wpending, &m->wsched); | ||
507 | err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos); | ||
508 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err); | ||
509 | if (err == -EAGAIN) { | ||
510 | clear_bit(Wworksched, &m->wsched); | ||
511 | return; | ||
512 | } | ||
513 | |||
514 | if (err < 0) | ||
515 | goto error; | ||
516 | else if (err == 0) { | ||
517 | err = -EREMOTEIO; | ||
518 | goto error; | ||
519 | } | ||
520 | |||
521 | m->wpos += err; | ||
522 | if (m->wpos == m->wsize) | ||
523 | m->wpos = m->wsize = 0; | ||
524 | |||
525 | if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) { | ||
526 | if (test_and_clear_bit(Wpending, &m->wsched)) | ||
527 | n = POLLOUT; | ||
528 | else | ||
529 | n = m->trans->poll(m->trans, NULL); | ||
530 | |||
531 | if (n & POLLOUT) { | ||
532 | P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m); | ||
533 | queue_work(p9_mux_wq, &m->wq); | ||
534 | } else | ||
535 | clear_bit(Wworksched, &m->wsched); | ||
536 | } else | ||
537 | clear_bit(Wworksched, &m->wsched); | ||
538 | |||
539 | return; | ||
540 | |||
541 | error: | ||
542 | p9_conn_cancel(m, err); | ||
543 | clear_bit(Wworksched, &m->wsched); | ||
544 | } | ||
545 | |||
546 | static void process_request(struct p9_conn *m, struct p9_req *req) | ||
547 | { | ||
548 | int ecode; | ||
549 | struct p9_str *ename; | ||
550 | |||
551 | if (!req->err && req->rcall->id == P9_RERROR) { | ||
552 | ecode = req->rcall->params.rerror.errno; | ||
553 | ename = &req->rcall->params.rerror.error; | ||
554 | |||
555 | P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len, | ||
556 | ename->str); | ||
557 | |||
558 | if (*m->extended) | ||
559 | req->err = -ecode; | ||
560 | |||
561 | if (!req->err) { | ||
562 | req->err = p9_errstr2errno(ename->str, ename->len); | ||
563 | |||
564 | if (!req->err) { /* string match failed */ | ||
565 | PRINT_FCALL_ERROR("unknown error", req->rcall); | ||
566 | } | ||
567 | |||
568 | if (!req->err) | ||
569 | req->err = -ESERVERFAULT; | ||
570 | } | ||
571 | } else if (req->tcall && req->rcall->id != req->tcall->id + 1) { | ||
572 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
573 | "fcall mismatch: expected %d, got %d\n", | ||
574 | req->tcall->id + 1, req->rcall->id); | ||
575 | if (!req->err) | ||
576 | req->err = -EIO; | ||
577 | } | ||
578 | } | ||
579 | |||
580 | /** | ||
581 | * p9_read_work - called when there is some data to be read from a transport | ||
582 | */ | ||
583 | static void p9_read_work(struct work_struct *work) | ||
584 | { | ||
585 | int n, err; | ||
586 | struct p9_conn *m; | ||
587 | struct p9_req *req, *rptr, *rreq; | ||
588 | struct p9_fcall *rcall; | ||
589 | char *rbuf; | ||
590 | |||
591 | m = container_of(work, struct p9_conn, rq); | ||
592 | |||
593 | if (m->err < 0) | ||
594 | return; | ||
595 | |||
596 | rcall = NULL; | ||
597 | P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos); | ||
598 | |||
599 | if (!m->rcall) { | ||
600 | m->rcall = | ||
601 | kmalloc(sizeof(struct p9_fcall) + m->msize, GFP_KERNEL); | ||
602 | if (!m->rcall) { | ||
603 | err = -ENOMEM; | ||
604 | goto error; | ||
605 | } | ||
606 | |||
607 | m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall); | ||
608 | m->rpos = 0; | ||
609 | } | ||
610 | |||
611 | clear_bit(Rpending, &m->wsched); | ||
612 | err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos); | ||
613 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err); | ||
614 | if (err == -EAGAIN) { | ||
615 | clear_bit(Rworksched, &m->wsched); | ||
616 | return; | ||
617 | } | ||
618 | |||
619 | if (err <= 0) | ||
620 | goto error; | ||
621 | |||
622 | m->rpos += err; | ||
623 | while (m->rpos > 4) { | ||
624 | n = le32_to_cpu(*(__le32 *) m->rbuf); | ||
625 | if (n >= m->msize) { | ||
626 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
627 | "requested packet size too big: %d\n", n); | ||
628 | err = -EIO; | ||
629 | goto error; | ||
630 | } | ||
631 | |||
632 | if (m->rpos < n) | ||
633 | break; | ||
634 | |||
635 | err = | ||
636 | p9_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended); | ||
637 | if (err < 0) { | ||
638 | goto error; | ||
639 | } | ||
640 | |||
641 | #ifdef CONFIG_NET_9P_DEBUG | ||
642 | if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { | ||
643 | char buf[150]; | ||
644 | |||
645 | p9_printfcall(buf, sizeof(buf), m->rcall, | ||
646 | *m->extended); | ||
647 | printk(KERN_NOTICE ">>> %p %s\n", m, buf); | ||
648 | } | ||
649 | #endif | ||
650 | |||
651 | rcall = m->rcall; | ||
652 | rbuf = m->rbuf; | ||
653 | if (m->rpos > n) { | ||
654 | m->rcall = kmalloc(sizeof(struct p9_fcall) + m->msize, | ||
655 | GFP_KERNEL); | ||
656 | if (!m->rcall) { | ||
657 | err = -ENOMEM; | ||
658 | goto error; | ||
659 | } | ||
660 | |||
661 | m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall); | ||
662 | memmove(m->rbuf, rbuf + n, m->rpos - n); | ||
663 | m->rpos -= n; | ||
664 | } else { | ||
665 | m->rcall = NULL; | ||
666 | m->rbuf = NULL; | ||
667 | m->rpos = 0; | ||
668 | } | ||
669 | |||
670 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, | ||
671 | rcall->id, rcall->tag); | ||
672 | |||
673 | req = NULL; | ||
674 | spin_lock(&m->lock); | ||
675 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | ||
676 | if (rreq->tag == rcall->tag) { | ||
677 | req = rreq; | ||
678 | if (req->flush != Flushing) | ||
679 | list_del(&req->req_list); | ||
680 | break; | ||
681 | } | ||
682 | } | ||
683 | spin_unlock(&m->lock); | ||
684 | |||
685 | if (req) { | ||
686 | req->rcall = rcall; | ||
687 | process_request(m, req); | ||
688 | |||
689 | if (req->flush != Flushing) { | ||
690 | if (req->cb) | ||
691 | (*req->cb) (req, req->cba); | ||
692 | else | ||
693 | kfree(req->rcall); | ||
694 | |||
695 | wake_up(&m->equeue); | ||
696 | } | ||
697 | } else { | ||
698 | if (err >= 0 && rcall->id != P9_RFLUSH) | ||
699 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
700 | "unexpected response mux %p id %d tag %d\n", | ||
701 | m, rcall->id, rcall->tag); | ||
702 | kfree(rcall); | ||
703 | } | ||
704 | } | ||
705 | |||
706 | if (!list_empty(&m->req_list)) { | ||
707 | if (test_and_clear_bit(Rpending, &m->wsched)) | ||
708 | n = POLLIN; | ||
709 | else | ||
710 | n = m->trans->poll(m->trans, NULL); | ||
711 | |||
712 | if (n & POLLIN) { | ||
713 | P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m); | ||
714 | queue_work(p9_mux_wq, &m->rq); | ||
715 | } else | ||
716 | clear_bit(Rworksched, &m->wsched); | ||
717 | } else | ||
718 | clear_bit(Rworksched, &m->wsched); | ||
719 | |||
720 | return; | ||
721 | |||
722 | error: | ||
723 | p9_conn_cancel(m, err); | ||
724 | clear_bit(Rworksched, &m->wsched); | ||
725 | } | ||
726 | |||
727 | /** | ||
728 | * p9_send_request - send 9P request | ||
729 | * The function can sleep until the request is scheduled for sending. | ||
730 | * The function can be interrupted. Return from the function is not | ||
731 | * a guarantee that the request is sent successfully. Can return errors | ||
732 | * that can be retrieved by PTR_ERR macros. | ||
733 | * | ||
734 | * @m: mux data | ||
735 | * @tc: request to be sent | ||
736 | * @cb: callback function to call when response is received | ||
737 | * @cba: parameter to pass to the callback function | ||
738 | */ | ||
739 | static struct p9_req *p9_send_request(struct p9_conn *m, | ||
740 | struct p9_fcall *tc, | ||
741 | p9_conn_req_callback cb, void *cba) | ||
742 | { | ||
743 | int n; | ||
744 | struct p9_req *req; | ||
745 | |||
746 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current, | ||
747 | tc, tc->id); | ||
748 | if (m->err < 0) | ||
749 | return ERR_PTR(m->err); | ||
750 | |||
751 | req = kmalloc(sizeof(struct p9_req), GFP_KERNEL); | ||
752 | if (!req) | ||
753 | return ERR_PTR(-ENOMEM); | ||
754 | |||
755 | if (tc->id == P9_TVERSION) | ||
756 | n = P9_NOTAG; | ||
757 | else | ||
758 | n = p9_mux_get_tag(m); | ||
759 | |||
760 | if (n < 0) | ||
761 | return ERR_PTR(-ENOMEM); | ||
762 | |||
763 | p9_set_tag(tc, n); | ||
764 | |||
765 | #ifdef CONFIG_NET_9P_DEBUG | ||
766 | if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { | ||
767 | char buf[150]; | ||
768 | |||
769 | p9_printfcall(buf, sizeof(buf), tc, *m->extended); | ||
770 | printk(KERN_NOTICE "<<< %p %s\n", m, buf); | ||
771 | } | ||
772 | #endif | ||
773 | |||
774 | spin_lock_init(&req->lock); | ||
775 | req->tag = n; | ||
776 | req->tcall = tc; | ||
777 | req->rcall = NULL; | ||
778 | req->err = 0; | ||
779 | req->cb = cb; | ||
780 | req->cba = cba; | ||
781 | req->flush = None; | ||
782 | |||
783 | spin_lock(&m->lock); | ||
784 | list_add_tail(&req->req_list, &m->unsent_req_list); | ||
785 | spin_unlock(&m->lock); | ||
786 | |||
787 | if (test_and_clear_bit(Wpending, &m->wsched)) | ||
788 | n = POLLOUT; | ||
789 | else | ||
790 | n = m->trans->poll(m->trans, NULL); | ||
791 | |||
792 | if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) | ||
793 | queue_work(p9_mux_wq, &m->wq); | ||
794 | |||
795 | return req; | ||
796 | } | ||
797 | |||
798 | static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req) | ||
799 | { | ||
800 | p9_mux_put_tag(m, req->tag); | ||
801 | kfree(req); | ||
802 | } | ||
803 | |||
804 | static void p9_mux_flush_cb(struct p9_req *freq, void *a) | ||
805 | { | ||
806 | p9_conn_req_callback cb; | ||
807 | int tag; | ||
808 | struct p9_conn *m; | ||
809 | struct p9_req *req, *rreq, *rptr; | ||
810 | |||
811 | m = a; | ||
812 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, | ||
813 | freq->tcall, freq->rcall, freq->err, | ||
814 | freq->tcall->params.tflush.oldtag); | ||
815 | |||
816 | spin_lock(&m->lock); | ||
817 | cb = NULL; | ||
818 | tag = freq->tcall->params.tflush.oldtag; | ||
819 | req = NULL; | ||
820 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | ||
821 | if (rreq->tag == tag) { | ||
822 | req = rreq; | ||
823 | list_del(&req->req_list); | ||
824 | break; | ||
825 | } | ||
826 | } | ||
827 | spin_unlock(&m->lock); | ||
828 | |||
829 | if (req) { | ||
830 | spin_lock(&req->lock); | ||
831 | req->flush = Flushed; | ||
832 | spin_unlock(&req->lock); | ||
833 | |||
834 | if (req->cb) | ||
835 | (*req->cb) (req, req->cba); | ||
836 | else | ||
837 | kfree(req->rcall); | ||
838 | |||
839 | wake_up(&m->equeue); | ||
840 | } | ||
841 | |||
842 | kfree(freq->tcall); | ||
843 | kfree(freq->rcall); | ||
844 | p9_mux_free_request(m, freq); | ||
845 | } | ||
846 | |||
847 | static int | ||
848 | p9_mux_flush_request(struct p9_conn *m, struct p9_req *req) | ||
849 | { | ||
850 | struct p9_fcall *fc; | ||
851 | struct p9_req *rreq, *rptr; | ||
852 | |||
853 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); | ||
854 | |||
855 | /* if a response was received for a request, do nothing */ | ||
856 | spin_lock(&req->lock); | ||
857 | if (req->rcall || req->err) { | ||
858 | spin_unlock(&req->lock); | ||
859 | P9_DPRINTK(P9_DEBUG_MUX, | ||
860 | "mux %p req %p response already received\n", m, req); | ||
861 | return 0; | ||
862 | } | ||
863 | |||
864 | req->flush = Flushing; | ||
865 | spin_unlock(&req->lock); | ||
866 | |||
867 | spin_lock(&m->lock); | ||
868 | /* if the request is not sent yet, just remove it from the list */ | ||
869 | list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) { | ||
870 | if (rreq->tag == req->tag) { | ||
871 | P9_DPRINTK(P9_DEBUG_MUX, | ||
872 | "mux %p req %p request is not sent yet\n", m, req); | ||
873 | list_del(&rreq->req_list); | ||
874 | req->flush = Flushed; | ||
875 | spin_unlock(&m->lock); | ||
876 | if (req->cb) | ||
877 | (*req->cb) (req, req->cba); | ||
878 | return 0; | ||
879 | } | ||
880 | } | ||
881 | spin_unlock(&m->lock); | ||
882 | |||
883 | clear_thread_flag(TIF_SIGPENDING); | ||
884 | fc = p9_create_tflush(req->tag); | ||
885 | p9_send_request(m, fc, p9_mux_flush_cb, m); | ||
886 | return 1; | ||
887 | } | ||
888 | |||
889 | static void | ||
890 | p9_conn_rpc_cb(struct p9_req *req, void *a) | ||
891 | { | ||
892 | struct p9_mux_rpc *r; | ||
893 | |||
894 | P9_DPRINTK(P9_DEBUG_MUX, "req %p r %p\n", req, a); | ||
895 | r = a; | ||
896 | r->rcall = req->rcall; | ||
897 | r->err = req->err; | ||
898 | |||
899 | if (req->flush != None && !req->err) | ||
900 | r->err = -ERESTARTSYS; | ||
901 | |||
902 | wake_up(&r->wqueue); | ||
903 | } | ||
904 | |||
905 | /** | ||
906 | * p9_mux_rpc - sends 9P request and waits until a response is available. | ||
907 | * The function can be interrupted. | ||
908 | * @m: mux data | ||
909 | * @tc: request to be sent | ||
910 | * @rc: pointer where a pointer to the response is stored | ||
911 | */ | ||
912 | int | ||
913 | p9_conn_rpc(struct p9_conn *m, struct p9_fcall *tc, | ||
914 | struct p9_fcall **rc) | ||
915 | { | ||
916 | int err, sigpending; | ||
917 | unsigned long flags; | ||
918 | struct p9_req *req; | ||
919 | struct p9_mux_rpc r; | ||
920 | |||
921 | r.err = 0; | ||
922 | r.tcall = tc; | ||
923 | r.rcall = NULL; | ||
924 | r.m = m; | ||
925 | init_waitqueue_head(&r.wqueue); | ||
926 | |||
927 | if (rc) | ||
928 | *rc = NULL; | ||
929 | |||
930 | sigpending = 0; | ||
931 | if (signal_pending(current)) { | ||
932 | sigpending = 1; | ||
933 | clear_thread_flag(TIF_SIGPENDING); | ||
934 | } | ||
935 | |||
936 | req = p9_send_request(m, tc, p9_conn_rpc_cb, &r); | ||
937 | if (IS_ERR(req)) { | ||
938 | err = PTR_ERR(req); | ||
939 | P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err); | ||
940 | return err; | ||
941 | } | ||
942 | |||
943 | err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); | ||
944 | if (r.err < 0) | ||
945 | err = r.err; | ||
946 | |||
947 | if (err == -ERESTARTSYS && m->trans->status == Connected | ||
948 | && m->err == 0) { | ||
949 | if (p9_mux_flush_request(m, req)) { | ||
950 | /* wait until we get response of the flush message */ | ||
951 | do { | ||
952 | clear_thread_flag(TIF_SIGPENDING); | ||
953 | err = wait_event_interruptible(r.wqueue, | ||
954 | r.rcall || r.err); | ||
955 | } while (!r.rcall && !r.err && err == -ERESTARTSYS && | ||
956 | m->trans->status == Connected && !m->err); | ||
957 | |||
958 | err = -ERESTARTSYS; | ||
959 | } | ||
960 | sigpending = 1; | ||
961 | } | ||
962 | |||
963 | if (sigpending) { | ||
964 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
965 | recalc_sigpending(); | ||
966 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
967 | } | ||
968 | |||
969 | if (rc) | ||
970 | *rc = r.rcall; | ||
971 | else | ||
972 | kfree(r.rcall); | ||
973 | |||
974 | p9_mux_free_request(m, req); | ||
975 | if (err > 0) | ||
976 | err = -EIO; | ||
977 | |||
978 | return err; | ||
979 | } | ||
980 | EXPORT_SYMBOL(p9_conn_rpc); | ||
981 | |||
982 | #ifdef P9_NONBLOCK | ||
983 | /** | ||
984 | * p9_conn_rpcnb - sends 9P request without waiting for response. | ||
985 | * @m: mux data | ||
986 | * @tc: request to be sent | ||
987 | * @cb: callback function to be called when response arrives | ||
988 | * @cba: value to pass to the callback function | ||
989 | */ | ||
990 | int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc, | ||
991 | p9_conn_req_callback cb, void *a) | ||
992 | { | ||
993 | int err; | ||
994 | struct p9_req *req; | ||
995 | |||
996 | req = p9_send_request(m, tc, cb, a); | ||
997 | if (IS_ERR(req)) { | ||
998 | err = PTR_ERR(req); | ||
999 | P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err); | ||
1000 | return PTR_ERR(req); | ||
1001 | } | ||
1002 | |||
1003 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag); | ||
1004 | return 0; | ||
1005 | } | ||
1006 | EXPORT_SYMBOL(p9_conn_rpcnb); | ||
1007 | #endif /* P9_NONBLOCK */ | ||
1008 | |||
1009 | /** | ||
1010 | * p9_conn_cancel - cancel all pending requests with error | ||
1011 | * @m: mux data | ||
1012 | * @err: error code | ||
1013 | */ | ||
1014 | void p9_conn_cancel(struct p9_conn *m, int err) | ||
1015 | { | ||
1016 | struct p9_req *req, *rtmp; | ||
1017 | LIST_HEAD(cancel_list); | ||
1018 | |||
1019 | P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); | ||
1020 | m->err = err; | ||
1021 | spin_lock(&m->lock); | ||
1022 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { | ||
1023 | list_move(&req->req_list, &cancel_list); | ||
1024 | } | ||
1025 | list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { | ||
1026 | list_move(&req->req_list, &cancel_list); | ||
1027 | } | ||
1028 | spin_unlock(&m->lock); | ||
1029 | |||
1030 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { | ||
1031 | list_del(&req->req_list); | ||
1032 | if (!req->err) | ||
1033 | req->err = err; | ||
1034 | |||
1035 | if (req->cb) | ||
1036 | (*req->cb) (req, req->cba); | ||
1037 | else | ||
1038 | kfree(req->rcall); | ||
1039 | } | ||
1040 | |||
1041 | wake_up(&m->equeue); | ||
1042 | } | ||
1043 | EXPORT_SYMBOL(p9_conn_cancel); | ||
1044 | |||
1045 | static u16 p9_mux_get_tag(struct p9_conn *m) | ||
1046 | { | ||
1047 | int tag; | ||
1048 | |||
1049 | tag = p9_idpool_get(m->tagpool); | ||
1050 | if (tag < 0) | ||
1051 | return P9_NOTAG; | ||
1052 | else | ||
1053 | return (u16) tag; | ||
1054 | } | ||
1055 | |||
1056 | static void p9_mux_put_tag(struct p9_conn *m, u16 tag) | ||
1057 | { | ||
1058 | if (tag != P9_NOTAG && p9_idpool_check(tag, m->tagpool)) | ||
1059 | p9_idpool_put(tag, m->tagpool); | ||
1060 | } | ||
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 62332ed9da4a..1aa9d5175398 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) 2006 by Russ Cox <rsc@swtch.com> | 6 | * Copyright (C) 2006 by Russ Cox <rsc@swtch.com> |
7 | * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net> | 7 | * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net> |
8 | * Copyright (C) 2004-2007 by Eric Van Hensbergen <ericvh@gmail.com> | 8 | * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com> |
9 | * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com> | 9 | * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com> |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/net.h> | 30 | #include <linux/net.h> |
31 | #include <linux/ipv6.h> | 31 | #include <linux/ipv6.h> |
32 | #include <linux/kthread.h> | ||
32 | #include <linux/errno.h> | 33 | #include <linux/errno.h> |
33 | #include <linux/kernel.h> | 34 | #include <linux/kernel.h> |
34 | #include <linux/un.h> | 35 | #include <linux/un.h> |
@@ -42,7 +43,9 @@ | |||
42 | 43 | ||
43 | #define P9_PORT 564 | 44 | #define P9_PORT 564 |
44 | #define MAX_SOCK_BUF (64*1024) | 45 | #define MAX_SOCK_BUF (64*1024) |
45 | 46 | #define ERREQFLUSH 1 | |
47 | #define SCHED_TIMEOUT 10 | ||
48 | #define MAXPOLLWADDR 2 | ||
46 | 49 | ||
47 | struct p9_fd_opts { | 50 | struct p9_fd_opts { |
48 | int rfd; | 51 | int rfd; |
@@ -53,6 +56,7 @@ struct p9_fd_opts { | |||
53 | struct p9_trans_fd { | 56 | struct p9_trans_fd { |
54 | struct file *rd; | 57 | struct file *rd; |
55 | struct file *wr; | 58 | struct file *wr; |
59 | struct p9_conn *conn; | ||
56 | }; | 60 | }; |
57 | 61 | ||
58 | /* | 62 | /* |
@@ -72,6 +76,1028 @@ static match_table_t tokens = { | |||
72 | {Opt_err, NULL}, | 76 | {Opt_err, NULL}, |
73 | }; | 77 | }; |
74 | 78 | ||
79 | enum { | ||
80 | Rworksched = 1, /* read work scheduled or running */ | ||
81 | Rpending = 2, /* can read */ | ||
82 | Wworksched = 4, /* write work scheduled or running */ | ||
83 | Wpending = 8, /* can write */ | ||
84 | }; | ||
85 | |||
86 | enum { | ||
87 | None, | ||
88 | Flushing, | ||
89 | Flushed, | ||
90 | }; | ||
91 | |||
92 | struct p9_req; | ||
93 | |||
94 | typedef void (*p9_conn_req_callback)(struct p9_req *req, void *a); | ||
95 | struct p9_req { | ||
96 | spinlock_t lock; /* protect request structure */ | ||
97 | int tag; | ||
98 | struct p9_fcall *tcall; | ||
99 | struct p9_fcall *rcall; | ||
100 | int err; | ||
101 | p9_conn_req_callback cb; | ||
102 | void *cba; | ||
103 | int flush; | ||
104 | struct list_head req_list; | ||
105 | }; | ||
106 | |||
107 | struct p9_mux_poll_task; | ||
108 | |||
109 | struct p9_conn { | ||
110 | spinlock_t lock; /* protect lock structure */ | ||
111 | struct list_head mux_list; | ||
112 | struct p9_mux_poll_task *poll_task; | ||
113 | int msize; | ||
114 | unsigned char extended; | ||
115 | struct p9_trans *trans; | ||
116 | struct p9_idpool *tagpool; | ||
117 | int err; | ||
118 | wait_queue_head_t equeue; | ||
119 | struct list_head req_list; | ||
120 | struct list_head unsent_req_list; | ||
121 | struct p9_fcall *rcall; | ||
122 | int rpos; | ||
123 | char *rbuf; | ||
124 | int wpos; | ||
125 | int wsize; | ||
126 | char *wbuf; | ||
127 | wait_queue_t poll_wait[MAXPOLLWADDR]; | ||
128 | wait_queue_head_t *poll_waddr[MAXPOLLWADDR]; | ||
129 | poll_table pt; | ||
130 | struct work_struct rq; | ||
131 | struct work_struct wq; | ||
132 | unsigned long wsched; | ||
133 | }; | ||
134 | |||
135 | struct p9_mux_poll_task { | ||
136 | struct task_struct *task; | ||
137 | struct list_head mux_list; | ||
138 | int muxnum; | ||
139 | }; | ||
140 | |||
141 | struct p9_mux_rpc { | ||
142 | struct p9_conn *m; | ||
143 | int err; | ||
144 | struct p9_fcall *tcall; | ||
145 | struct p9_fcall *rcall; | ||
146 | wait_queue_head_t wqueue; | ||
147 | }; | ||
148 | |||
149 | static int p9_poll_proc(void *); | ||
150 | static void p9_read_work(struct work_struct *work); | ||
151 | static void p9_write_work(struct work_struct *work); | ||
152 | static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, | ||
153 | poll_table *p); | ||
154 | static int p9_fd_write(struct p9_trans *trans, void *v, int len); | ||
155 | static int p9_fd_read(struct p9_trans *trans, void *v, int len); | ||
156 | |||
157 | static DEFINE_MUTEX(p9_mux_task_lock); | ||
158 | static struct workqueue_struct *p9_mux_wq; | ||
159 | |||
160 | static int p9_mux_num; | ||
161 | static int p9_mux_poll_task_num; | ||
162 | static struct p9_mux_poll_task p9_mux_poll_tasks[100]; | ||
163 | |||
164 | static void p9_conn_destroy(struct p9_conn *); | ||
165 | static unsigned int p9_fd_poll(struct p9_trans *trans, | ||
166 | struct poll_table_struct *pt); | ||
167 | |||
168 | #ifdef P9_NONBLOCK | ||
169 | static int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc, | ||
170 | p9_conn_req_callback cb, void *a); | ||
171 | #endif /* P9_NONBLOCK */ | ||
172 | |||
173 | static void p9_conn_cancel(struct p9_conn *m, int err); | ||
174 | |||
175 | static int p9_mux_global_init(void) | ||
176 | { | ||
177 | int i; | ||
178 | |||
179 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) | ||
180 | p9_mux_poll_tasks[i].task = NULL; | ||
181 | |||
182 | p9_mux_wq = create_workqueue("v9fs"); | ||
183 | if (!p9_mux_wq) { | ||
184 | printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n"); | ||
185 | return -ENOMEM; | ||
186 | } | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static u16 p9_mux_get_tag(struct p9_conn *m) | ||
192 | { | ||
193 | int tag; | ||
194 | |||
195 | tag = p9_idpool_get(m->tagpool); | ||
196 | if (tag < 0) | ||
197 | return P9_NOTAG; | ||
198 | else | ||
199 | return (u16) tag; | ||
200 | } | ||
201 | |||
202 | static void p9_mux_put_tag(struct p9_conn *m, u16 tag) | ||
203 | { | ||
204 | if (tag != P9_NOTAG && p9_idpool_check(tag, m->tagpool)) | ||
205 | p9_idpool_put(tag, m->tagpool); | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * p9_mux_calc_poll_procs - calculates the number of polling procs | ||
210 | * based on the number of mounted v9fs filesystems. | ||
211 | * | ||
212 | * The current implementation returns sqrt of the number of mounts. | ||
213 | */ | ||
214 | static int p9_mux_calc_poll_procs(int muxnum) | ||
215 | { | ||
216 | int n; | ||
217 | |||
218 | if (p9_mux_poll_task_num) | ||
219 | n = muxnum / p9_mux_poll_task_num + | ||
220 | (muxnum % p9_mux_poll_task_num ? 1 : 0); | ||
221 | else | ||
222 | n = 1; | ||
223 | |||
224 | if (n > ARRAY_SIZE(p9_mux_poll_tasks)) | ||
225 | n = ARRAY_SIZE(p9_mux_poll_tasks); | ||
226 | |||
227 | return n; | ||
228 | } | ||
229 | |||
230 | static int p9_mux_poll_start(struct p9_conn *m) | ||
231 | { | ||
232 | int i, n; | ||
233 | struct p9_mux_poll_task *vpt, *vptlast; | ||
234 | struct task_struct *pproc; | ||
235 | |||
236 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, p9_mux_num, | ||
237 | p9_mux_poll_task_num); | ||
238 | mutex_lock(&p9_mux_task_lock); | ||
239 | |||
240 | n = p9_mux_calc_poll_procs(p9_mux_num + 1); | ||
241 | if (n > p9_mux_poll_task_num) { | ||
242 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) { | ||
243 | if (p9_mux_poll_tasks[i].task == NULL) { | ||
244 | vpt = &p9_mux_poll_tasks[i]; | ||
245 | P9_DPRINTK(P9_DEBUG_MUX, "create proc %p\n", | ||
246 | vpt); | ||
247 | pproc = kthread_create(p9_poll_proc, vpt, | ||
248 | "v9fs-poll"); | ||
249 | |||
250 | if (!IS_ERR(pproc)) { | ||
251 | vpt->task = pproc; | ||
252 | INIT_LIST_HEAD(&vpt->mux_list); | ||
253 | vpt->muxnum = 0; | ||
254 | p9_mux_poll_task_num++; | ||
255 | wake_up_process(vpt->task); | ||
256 | } | ||
257 | break; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) | ||
262 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
263 | "warning: no free poll slots\n"); | ||
264 | } | ||
265 | |||
266 | n = (p9_mux_num + 1) / p9_mux_poll_task_num + | ||
267 | ((p9_mux_num + 1) % p9_mux_poll_task_num ? 1 : 0); | ||
268 | |||
269 | vptlast = NULL; | ||
270 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) { | ||
271 | vpt = &p9_mux_poll_tasks[i]; | ||
272 | if (vpt->task != NULL) { | ||
273 | vptlast = vpt; | ||
274 | if (vpt->muxnum < n) { | ||
275 | P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); | ||
276 | list_add(&m->mux_list, &vpt->mux_list); | ||
277 | vpt->muxnum++; | ||
278 | m->poll_task = vpt; | ||
279 | memset(&m->poll_waddr, 0, | ||
280 | sizeof(m->poll_waddr)); | ||
281 | init_poll_funcptr(&m->pt, p9_pollwait); | ||
282 | break; | ||
283 | } | ||
284 | } | ||
285 | } | ||
286 | |||
287 | if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) { | ||
288 | if (vptlast == NULL) { | ||
289 | mutex_unlock(&p9_mux_task_lock); | ||
290 | return -ENOMEM; | ||
291 | } | ||
292 | |||
293 | P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); | ||
294 | list_add(&m->mux_list, &vptlast->mux_list); | ||
295 | vptlast->muxnum++; | ||
296 | m->poll_task = vptlast; | ||
297 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); | ||
298 | init_poll_funcptr(&m->pt, p9_pollwait); | ||
299 | } | ||
300 | |||
301 | p9_mux_num++; | ||
302 | mutex_unlock(&p9_mux_task_lock); | ||
303 | |||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | static void p9_mux_poll_stop(struct p9_conn *m) | ||
308 | { | ||
309 | int i; | ||
310 | struct p9_mux_poll_task *vpt; | ||
311 | |||
312 | mutex_lock(&p9_mux_task_lock); | ||
313 | vpt = m->poll_task; | ||
314 | list_del(&m->mux_list); | ||
315 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { | ||
316 | if (m->poll_waddr[i] != NULL) { | ||
317 | remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]); | ||
318 | m->poll_waddr[i] = NULL; | ||
319 | } | ||
320 | } | ||
321 | vpt->muxnum--; | ||
322 | if (!vpt->muxnum) { | ||
323 | P9_DPRINTK(P9_DEBUG_MUX, "destroy proc %p\n", vpt); | ||
324 | kthread_stop(vpt->task); | ||
325 | vpt->task = NULL; | ||
326 | p9_mux_poll_task_num--; | ||
327 | } | ||
328 | p9_mux_num--; | ||
329 | mutex_unlock(&p9_mux_task_lock); | ||
330 | } | ||
331 | |||
332 | /** | ||
333 | * p9_conn_create - allocate and initialize the per-session mux data | ||
334 | * Creates the polling task if this is the first session. | ||
335 | * | ||
336 | * @trans - transport structure | ||
337 | * @msize - maximum message size | ||
338 | * @extended - extended flag | ||
339 | */ | ||
340 | static struct p9_conn *p9_conn_create(struct p9_trans *trans) | ||
341 | { | ||
342 | int i, n; | ||
343 | struct p9_conn *m, *mtmp; | ||
344 | |||
345 | P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans, | ||
346 | trans->msize); | ||
347 | m = kmalloc(sizeof(struct p9_conn), GFP_KERNEL); | ||
348 | if (!m) | ||
349 | return ERR_PTR(-ENOMEM); | ||
350 | |||
351 | spin_lock_init(&m->lock); | ||
352 | INIT_LIST_HEAD(&m->mux_list); | ||
353 | m->msize = trans->msize; | ||
354 | m->extended = trans->extended; | ||
355 | m->trans = trans; | ||
356 | m->tagpool = p9_idpool_create(); | ||
357 | if (IS_ERR(m->tagpool)) { | ||
358 | mtmp = ERR_PTR(-ENOMEM); | ||
359 | kfree(m); | ||
360 | return mtmp; | ||
361 | } | ||
362 | |||
363 | m->err = 0; | ||
364 | init_waitqueue_head(&m->equeue); | ||
365 | INIT_LIST_HEAD(&m->req_list); | ||
366 | INIT_LIST_HEAD(&m->unsent_req_list); | ||
367 | m->rcall = NULL; | ||
368 | m->rpos = 0; | ||
369 | m->rbuf = NULL; | ||
370 | m->wpos = m->wsize = 0; | ||
371 | m->wbuf = NULL; | ||
372 | INIT_WORK(&m->rq, p9_read_work); | ||
373 | INIT_WORK(&m->wq, p9_write_work); | ||
374 | m->wsched = 0; | ||
375 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); | ||
376 | m->poll_task = NULL; | ||
377 | n = p9_mux_poll_start(m); | ||
378 | if (n) { | ||
379 | kfree(m); | ||
380 | return ERR_PTR(n); | ||
381 | } | ||
382 | |||
383 | n = p9_fd_poll(trans, &m->pt); | ||
384 | if (n & POLLIN) { | ||
385 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m); | ||
386 | set_bit(Rpending, &m->wsched); | ||
387 | } | ||
388 | |||
389 | if (n & POLLOUT) { | ||
390 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m); | ||
391 | set_bit(Wpending, &m->wsched); | ||
392 | } | ||
393 | |||
394 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { | ||
395 | if (IS_ERR(m->poll_waddr[i])) { | ||
396 | p9_mux_poll_stop(m); | ||
397 | mtmp = (void *)m->poll_waddr; /* the error code */ | ||
398 | kfree(m); | ||
399 | m = mtmp; | ||
400 | break; | ||
401 | } | ||
402 | } | ||
403 | |||
404 | return m; | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * p9_mux_destroy - cancels all pending requests and frees mux resources | ||
409 | */ | ||
410 | static void p9_conn_destroy(struct p9_conn *m) | ||
411 | { | ||
412 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m, | ||
413 | m->mux_list.prev, m->mux_list.next); | ||
414 | p9_conn_cancel(m, -ECONNRESET); | ||
415 | |||
416 | if (!list_empty(&m->req_list)) { | ||
417 | /* wait until all processes waiting on this session exit */ | ||
418 | P9_DPRINTK(P9_DEBUG_MUX, | ||
419 | "mux %p waiting for empty request queue\n", m); | ||
420 | wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000); | ||
421 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p request queue empty: %d\n", m, | ||
422 | list_empty(&m->req_list)); | ||
423 | } | ||
424 | |||
425 | p9_mux_poll_stop(m); | ||
426 | m->trans = NULL; | ||
427 | p9_idpool_destroy(m->tagpool); | ||
428 | kfree(m); | ||
429 | } | ||
430 | |||
431 | /** | ||
432 | * p9_pollwait - called by files poll operation to add v9fs-poll task | ||
433 | * to files wait queue | ||
434 | */ | ||
435 | static void | ||
436 | p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p) | ||
437 | { | ||
438 | int i; | ||
439 | struct p9_conn *m; | ||
440 | |||
441 | m = container_of(p, struct p9_conn, pt); | ||
442 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) | ||
443 | if (m->poll_waddr[i] == NULL) | ||
444 | break; | ||
445 | |||
446 | if (i >= ARRAY_SIZE(m->poll_waddr)) { | ||
447 | P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n"); | ||
448 | return; | ||
449 | } | ||
450 | |||
451 | m->poll_waddr[i] = wait_address; | ||
452 | |||
453 | if (!wait_address) { | ||
454 | P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n"); | ||
455 | m->poll_waddr[i] = ERR_PTR(-EIO); | ||
456 | return; | ||
457 | } | ||
458 | |||
459 | init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task); | ||
460 | add_wait_queue(wait_address, &m->poll_wait[i]); | ||
461 | } | ||
462 | |||
463 | /** | ||
464 | * p9_poll_mux - polls a mux and schedules read or write works if necessary | ||
465 | */ | ||
466 | static void p9_poll_mux(struct p9_conn *m) | ||
467 | { | ||
468 | int n; | ||
469 | |||
470 | if (m->err < 0) | ||
471 | return; | ||
472 | |||
473 | n = p9_fd_poll(m->trans, NULL); | ||
474 | if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) { | ||
475 | P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n); | ||
476 | if (n >= 0) | ||
477 | n = -ECONNRESET; | ||
478 | p9_conn_cancel(m, n); | ||
479 | } | ||
480 | |||
481 | if (n & POLLIN) { | ||
482 | set_bit(Rpending, &m->wsched); | ||
483 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m); | ||
484 | if (!test_and_set_bit(Rworksched, &m->wsched)) { | ||
485 | P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m); | ||
486 | queue_work(p9_mux_wq, &m->rq); | ||
487 | } | ||
488 | } | ||
489 | |||
490 | if (n & POLLOUT) { | ||
491 | set_bit(Wpending, &m->wsched); | ||
492 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m); | ||
493 | if ((m->wsize || !list_empty(&m->unsent_req_list)) | ||
494 | && !test_and_set_bit(Wworksched, &m->wsched)) { | ||
495 | P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m); | ||
496 | queue_work(p9_mux_wq, &m->wq); | ||
497 | } | ||
498 | } | ||
499 | } | ||
500 | |||
501 | /** | ||
502 | * p9_poll_proc - polls all v9fs transports for new events and queues | ||
503 | * the appropriate work to the work queue | ||
504 | */ | ||
505 | static int p9_poll_proc(void *a) | ||
506 | { | ||
507 | struct p9_conn *m, *mtmp; | ||
508 | struct p9_mux_poll_task *vpt; | ||
509 | |||
510 | vpt = a; | ||
511 | P9_DPRINTK(P9_DEBUG_MUX, "start %p %p\n", current, vpt); | ||
512 | while (!kthread_should_stop()) { | ||
513 | set_current_state(TASK_INTERRUPTIBLE); | ||
514 | |||
515 | list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) { | ||
516 | p9_poll_mux(m); | ||
517 | } | ||
518 | |||
519 | P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n"); | ||
520 | schedule_timeout(SCHED_TIMEOUT * HZ); | ||
521 | } | ||
522 | |||
523 | __set_current_state(TASK_RUNNING); | ||
524 | P9_DPRINTK(P9_DEBUG_MUX, "finish\n"); | ||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | /** | ||
529 | * p9_write_work - called when a transport can send some data | ||
530 | */ | ||
531 | static void p9_write_work(struct work_struct *work) | ||
532 | { | ||
533 | int n, err; | ||
534 | struct p9_conn *m; | ||
535 | struct p9_req *req; | ||
536 | |||
537 | m = container_of(work, struct p9_conn, wq); | ||
538 | |||
539 | if (m->err < 0) { | ||
540 | clear_bit(Wworksched, &m->wsched); | ||
541 | return; | ||
542 | } | ||
543 | |||
544 | if (!m->wsize) { | ||
545 | if (list_empty(&m->unsent_req_list)) { | ||
546 | clear_bit(Wworksched, &m->wsched); | ||
547 | return; | ||
548 | } | ||
549 | |||
550 | spin_lock(&m->lock); | ||
551 | again: | ||
552 | req = list_entry(m->unsent_req_list.next, struct p9_req, | ||
553 | req_list); | ||
554 | list_move_tail(&req->req_list, &m->req_list); | ||
555 | if (req->err == ERREQFLUSH) | ||
556 | goto again; | ||
557 | |||
558 | m->wbuf = req->tcall->sdata; | ||
559 | m->wsize = req->tcall->size; | ||
560 | m->wpos = 0; | ||
561 | spin_unlock(&m->lock); | ||
562 | } | ||
563 | |||
564 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, | ||
565 | m->wsize); | ||
566 | clear_bit(Wpending, &m->wsched); | ||
567 | err = p9_fd_write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos); | ||
568 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err); | ||
569 | if (err == -EAGAIN) { | ||
570 | clear_bit(Wworksched, &m->wsched); | ||
571 | return; | ||
572 | } | ||
573 | |||
574 | if (err < 0) | ||
575 | goto error; | ||
576 | else if (err == 0) { | ||
577 | err = -EREMOTEIO; | ||
578 | goto error; | ||
579 | } | ||
580 | |||
581 | m->wpos += err; | ||
582 | if (m->wpos == m->wsize) | ||
583 | m->wpos = m->wsize = 0; | ||
584 | |||
585 | if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) { | ||
586 | if (test_and_clear_bit(Wpending, &m->wsched)) | ||
587 | n = POLLOUT; | ||
588 | else | ||
589 | n = p9_fd_poll(m->trans, NULL); | ||
590 | |||
591 | if (n & POLLOUT) { | ||
592 | P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m); | ||
593 | queue_work(p9_mux_wq, &m->wq); | ||
594 | } else | ||
595 | clear_bit(Wworksched, &m->wsched); | ||
596 | } else | ||
597 | clear_bit(Wworksched, &m->wsched); | ||
598 | |||
599 | return; | ||
600 | |||
601 | error: | ||
602 | p9_conn_cancel(m, err); | ||
603 | clear_bit(Wworksched, &m->wsched); | ||
604 | } | ||
605 | |||
606 | static void process_request(struct p9_conn *m, struct p9_req *req) | ||
607 | { | ||
608 | int ecode; | ||
609 | struct p9_str *ename; | ||
610 | |||
611 | if (!req->err && req->rcall->id == P9_RERROR) { | ||
612 | ecode = req->rcall->params.rerror.errno; | ||
613 | ename = &req->rcall->params.rerror.error; | ||
614 | |||
615 | P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len, | ||
616 | ename->str); | ||
617 | |||
618 | if (m->extended) | ||
619 | req->err = -ecode; | ||
620 | |||
621 | if (!req->err) { | ||
622 | req->err = p9_errstr2errno(ename->str, ename->len); | ||
623 | |||
624 | /* string match failed */ | ||
625 | if (!req->err) { | ||
626 | PRINT_FCALL_ERROR("unknown error", req->rcall); | ||
627 | req->err = -ESERVERFAULT; | ||
628 | } | ||
629 | } | ||
630 | } else if (req->tcall && req->rcall->id != req->tcall->id + 1) { | ||
631 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
632 | "fcall mismatch: expected %d, got %d\n", | ||
633 | req->tcall->id + 1, req->rcall->id); | ||
634 | if (!req->err) | ||
635 | req->err = -EIO; | ||
636 | } | ||
637 | } | ||
638 | |||
639 | /** | ||
640 | * p9_read_work - called when there is some data to be read from a transport | ||
641 | */ | ||
642 | static void p9_read_work(struct work_struct *work) | ||
643 | { | ||
644 | int n, err; | ||
645 | struct p9_conn *m; | ||
646 | struct p9_req *req, *rptr, *rreq; | ||
647 | struct p9_fcall *rcall; | ||
648 | char *rbuf; | ||
649 | |||
650 | m = container_of(work, struct p9_conn, rq); | ||
651 | |||
652 | if (m->err < 0) | ||
653 | return; | ||
654 | |||
655 | rcall = NULL; | ||
656 | P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos); | ||
657 | |||
658 | if (!m->rcall) { | ||
659 | m->rcall = | ||
660 | kmalloc(sizeof(struct p9_fcall) + m->msize, GFP_KERNEL); | ||
661 | if (!m->rcall) { | ||
662 | err = -ENOMEM; | ||
663 | goto error; | ||
664 | } | ||
665 | |||
666 | m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall); | ||
667 | m->rpos = 0; | ||
668 | } | ||
669 | |||
670 | clear_bit(Rpending, &m->wsched); | ||
671 | err = p9_fd_read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos); | ||
672 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err); | ||
673 | if (err == -EAGAIN) { | ||
674 | clear_bit(Rworksched, &m->wsched); | ||
675 | return; | ||
676 | } | ||
677 | |||
678 | if (err <= 0) | ||
679 | goto error; | ||
680 | |||
681 | m->rpos += err; | ||
682 | while (m->rpos > 4) { | ||
683 | n = le32_to_cpu(*(__le32 *) m->rbuf); | ||
684 | if (n >= m->msize) { | ||
685 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
686 | "requested packet size too big: %d\n", n); | ||
687 | err = -EIO; | ||
688 | goto error; | ||
689 | } | ||
690 | |||
691 | if (m->rpos < n) | ||
692 | break; | ||
693 | |||
694 | err = | ||
695 | p9_deserialize_fcall(m->rbuf, n, m->rcall, m->extended); | ||
696 | if (err < 0) | ||
697 | goto error; | ||
698 | |||
699 | #ifdef CONFIG_NET_9P_DEBUG | ||
700 | if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { | ||
701 | char buf[150]; | ||
702 | |||
703 | p9_printfcall(buf, sizeof(buf), m->rcall, | ||
704 | m->extended); | ||
705 | printk(KERN_NOTICE ">>> %p %s\n", m, buf); | ||
706 | } | ||
707 | #endif | ||
708 | |||
709 | rcall = m->rcall; | ||
710 | rbuf = m->rbuf; | ||
711 | if (m->rpos > n) { | ||
712 | m->rcall = kmalloc(sizeof(struct p9_fcall) + m->msize, | ||
713 | GFP_KERNEL); | ||
714 | if (!m->rcall) { | ||
715 | err = -ENOMEM; | ||
716 | goto error; | ||
717 | } | ||
718 | |||
719 | m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall); | ||
720 | memmove(m->rbuf, rbuf + n, m->rpos - n); | ||
721 | m->rpos -= n; | ||
722 | } else { | ||
723 | m->rcall = NULL; | ||
724 | m->rbuf = NULL; | ||
725 | m->rpos = 0; | ||
726 | } | ||
727 | |||
728 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, | ||
729 | rcall->id, rcall->tag); | ||
730 | |||
731 | req = NULL; | ||
732 | spin_lock(&m->lock); | ||
733 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | ||
734 | if (rreq->tag == rcall->tag) { | ||
735 | req = rreq; | ||
736 | if (req->flush != Flushing) | ||
737 | list_del(&req->req_list); | ||
738 | break; | ||
739 | } | ||
740 | } | ||
741 | spin_unlock(&m->lock); | ||
742 | |||
743 | if (req) { | ||
744 | req->rcall = rcall; | ||
745 | process_request(m, req); | ||
746 | |||
747 | if (req->flush != Flushing) { | ||
748 | if (req->cb) | ||
749 | (*req->cb) (req, req->cba); | ||
750 | else | ||
751 | kfree(req->rcall); | ||
752 | |||
753 | wake_up(&m->equeue); | ||
754 | } | ||
755 | } else { | ||
756 | if (err >= 0 && rcall->id != P9_RFLUSH) | ||
757 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
758 | "unexpected response mux %p id %d tag %d\n", | ||
759 | m, rcall->id, rcall->tag); | ||
760 | kfree(rcall); | ||
761 | } | ||
762 | } | ||
763 | |||
764 | if (!list_empty(&m->req_list)) { | ||
765 | if (test_and_clear_bit(Rpending, &m->wsched)) | ||
766 | n = POLLIN; | ||
767 | else | ||
768 | n = p9_fd_poll(m->trans, NULL); | ||
769 | |||
770 | if (n & POLLIN) { | ||
771 | P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m); | ||
772 | queue_work(p9_mux_wq, &m->rq); | ||
773 | } else | ||
774 | clear_bit(Rworksched, &m->wsched); | ||
775 | } else | ||
776 | clear_bit(Rworksched, &m->wsched); | ||
777 | |||
778 | return; | ||
779 | |||
780 | error: | ||
781 | p9_conn_cancel(m, err); | ||
782 | clear_bit(Rworksched, &m->wsched); | ||
783 | } | ||
784 | |||
785 | /** | ||
786 | * p9_send_request - send 9P request | ||
787 | * The function can sleep until the request is scheduled for sending. | ||
788 | * The function can be interrupted. Return from the function is not | ||
789 | * a guarantee that the request is sent successfully. Can return errors | ||
790 | * that can be retrieved by PTR_ERR macros. | ||
791 | * | ||
792 | * @m: mux data | ||
793 | * @tc: request to be sent | ||
794 | * @cb: callback function to call when response is received | ||
795 | * @cba: parameter to pass to the callback function | ||
796 | */ | ||
797 | static struct p9_req *p9_send_request(struct p9_conn *m, | ||
798 | struct p9_fcall *tc, | ||
799 | p9_conn_req_callback cb, void *cba) | ||
800 | { | ||
801 | int n; | ||
802 | struct p9_req *req; | ||
803 | |||
804 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current, | ||
805 | tc, tc->id); | ||
806 | if (m->err < 0) | ||
807 | return ERR_PTR(m->err); | ||
808 | |||
809 | req = kmalloc(sizeof(struct p9_req), GFP_KERNEL); | ||
810 | if (!req) | ||
811 | return ERR_PTR(-ENOMEM); | ||
812 | |||
813 | if (tc->id == P9_TVERSION) | ||
814 | n = P9_NOTAG; | ||
815 | else | ||
816 | n = p9_mux_get_tag(m); | ||
817 | |||
818 | if (n < 0) | ||
819 | return ERR_PTR(-ENOMEM); | ||
820 | |||
821 | p9_set_tag(tc, n); | ||
822 | |||
823 | #ifdef CONFIG_NET_9P_DEBUG | ||
824 | if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { | ||
825 | char buf[150]; | ||
826 | |||
827 | p9_printfcall(buf, sizeof(buf), tc, m->extended); | ||
828 | printk(KERN_NOTICE "<<< %p %s\n", m, buf); | ||
829 | } | ||
830 | #endif | ||
831 | |||
832 | spin_lock_init(&req->lock); | ||
833 | req->tag = n; | ||
834 | req->tcall = tc; | ||
835 | req->rcall = NULL; | ||
836 | req->err = 0; | ||
837 | req->cb = cb; | ||
838 | req->cba = cba; | ||
839 | req->flush = None; | ||
840 | |||
841 | spin_lock(&m->lock); | ||
842 | list_add_tail(&req->req_list, &m->unsent_req_list); | ||
843 | spin_unlock(&m->lock); | ||
844 | |||
845 | if (test_and_clear_bit(Wpending, &m->wsched)) | ||
846 | n = POLLOUT; | ||
847 | else | ||
848 | n = p9_fd_poll(m->trans, NULL); | ||
849 | |||
850 | if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) | ||
851 | queue_work(p9_mux_wq, &m->wq); | ||
852 | |||
853 | return req; | ||
854 | } | ||
855 | |||
856 | static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req) | ||
857 | { | ||
858 | p9_mux_put_tag(m, req->tag); | ||
859 | kfree(req); | ||
860 | } | ||
861 | |||
862 | static void p9_mux_flush_cb(struct p9_req *freq, void *a) | ||
863 | { | ||
864 | p9_conn_req_callback cb; | ||
865 | int tag; | ||
866 | struct p9_conn *m; | ||
867 | struct p9_req *req, *rreq, *rptr; | ||
868 | |||
869 | m = a; | ||
870 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, | ||
871 | freq->tcall, freq->rcall, freq->err, | ||
872 | freq->tcall->params.tflush.oldtag); | ||
873 | |||
874 | spin_lock(&m->lock); | ||
875 | cb = NULL; | ||
876 | tag = freq->tcall->params.tflush.oldtag; | ||
877 | req = NULL; | ||
878 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | ||
879 | if (rreq->tag == tag) { | ||
880 | req = rreq; | ||
881 | list_del(&req->req_list); | ||
882 | break; | ||
883 | } | ||
884 | } | ||
885 | spin_unlock(&m->lock); | ||
886 | |||
887 | if (req) { | ||
888 | spin_lock(&req->lock); | ||
889 | req->flush = Flushed; | ||
890 | spin_unlock(&req->lock); | ||
891 | |||
892 | if (req->cb) | ||
893 | (*req->cb) (req, req->cba); | ||
894 | else | ||
895 | kfree(req->rcall); | ||
896 | |||
897 | wake_up(&m->equeue); | ||
898 | } | ||
899 | |||
900 | kfree(freq->tcall); | ||
901 | kfree(freq->rcall); | ||
902 | p9_mux_free_request(m, freq); | ||
903 | } | ||
904 | |||
905 | static int | ||
906 | p9_mux_flush_request(struct p9_conn *m, struct p9_req *req) | ||
907 | { | ||
908 | struct p9_fcall *fc; | ||
909 | struct p9_req *rreq, *rptr; | ||
910 | |||
911 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); | ||
912 | |||
913 | /* if a response was received for a request, do nothing */ | ||
914 | spin_lock(&req->lock); | ||
915 | if (req->rcall || req->err) { | ||
916 | spin_unlock(&req->lock); | ||
917 | P9_DPRINTK(P9_DEBUG_MUX, | ||
918 | "mux %p req %p response already received\n", m, req); | ||
919 | return 0; | ||
920 | } | ||
921 | |||
922 | req->flush = Flushing; | ||
923 | spin_unlock(&req->lock); | ||
924 | |||
925 | spin_lock(&m->lock); | ||
926 | /* if the request is not sent yet, just remove it from the list */ | ||
927 | list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) { | ||
928 | if (rreq->tag == req->tag) { | ||
929 | P9_DPRINTK(P9_DEBUG_MUX, | ||
930 | "mux %p req %p request is not sent yet\n", m, req); | ||
931 | list_del(&rreq->req_list); | ||
932 | req->flush = Flushed; | ||
933 | spin_unlock(&m->lock); | ||
934 | if (req->cb) | ||
935 | (*req->cb) (req, req->cba); | ||
936 | return 0; | ||
937 | } | ||
938 | } | ||
939 | spin_unlock(&m->lock); | ||
940 | |||
941 | clear_thread_flag(TIF_SIGPENDING); | ||
942 | fc = p9_create_tflush(req->tag); | ||
943 | p9_send_request(m, fc, p9_mux_flush_cb, m); | ||
944 | return 1; | ||
945 | } | ||
946 | |||
947 | static void | ||
948 | p9_conn_rpc_cb(struct p9_req *req, void *a) | ||
949 | { | ||
950 | struct p9_mux_rpc *r; | ||
951 | |||
952 | P9_DPRINTK(P9_DEBUG_MUX, "req %p r %p\n", req, a); | ||
953 | r = a; | ||
954 | r->rcall = req->rcall; | ||
955 | r->err = req->err; | ||
956 | |||
957 | if (req->flush != None && !req->err) | ||
958 | r->err = -ERESTARTSYS; | ||
959 | |||
960 | wake_up(&r->wqueue); | ||
961 | } | ||
962 | |||
963 | /** | ||
964 | * p9_fd_rpc- sends 9P request and waits until a response is available. | ||
965 | * The function can be interrupted. | ||
966 | * @m: mux data | ||
967 | * @tc: request to be sent | ||
968 | * @rc: pointer where a pointer to the response is stored | ||
969 | */ | ||
970 | int | ||
971 | p9_fd_rpc(struct p9_trans *t, struct p9_fcall *tc, struct p9_fcall **rc) | ||
972 | { | ||
973 | struct p9_trans_fd *p = t->priv; | ||
974 | struct p9_conn *m = p->conn; | ||
975 | int err, sigpending; | ||
976 | unsigned long flags; | ||
977 | struct p9_req *req; | ||
978 | struct p9_mux_rpc r; | ||
979 | |||
980 | r.err = 0; | ||
981 | r.tcall = tc; | ||
982 | r.rcall = NULL; | ||
983 | r.m = m; | ||
984 | init_waitqueue_head(&r.wqueue); | ||
985 | |||
986 | if (rc) | ||
987 | *rc = NULL; | ||
988 | |||
989 | sigpending = 0; | ||
990 | if (signal_pending(current)) { | ||
991 | sigpending = 1; | ||
992 | clear_thread_flag(TIF_SIGPENDING); | ||
993 | } | ||
994 | |||
995 | req = p9_send_request(m, tc, p9_conn_rpc_cb, &r); | ||
996 | if (IS_ERR(req)) { | ||
997 | err = PTR_ERR(req); | ||
998 | P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err); | ||
999 | return err; | ||
1000 | } | ||
1001 | |||
1002 | err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); | ||
1003 | if (r.err < 0) | ||
1004 | err = r.err; | ||
1005 | |||
1006 | if (err == -ERESTARTSYS && m->trans->status == Connected | ||
1007 | && m->err == 0) { | ||
1008 | if (p9_mux_flush_request(m, req)) { | ||
1009 | /* wait until we get response of the flush message */ | ||
1010 | do { | ||
1011 | clear_thread_flag(TIF_SIGPENDING); | ||
1012 | err = wait_event_interruptible(r.wqueue, | ||
1013 | r.rcall || r.err); | ||
1014 | } while (!r.rcall && !r.err && err == -ERESTARTSYS && | ||
1015 | m->trans->status == Connected && !m->err); | ||
1016 | |||
1017 | err = -ERESTARTSYS; | ||
1018 | } | ||
1019 | sigpending = 1; | ||
1020 | } | ||
1021 | |||
1022 | if (sigpending) { | ||
1023 | spin_lock_irqsave(¤t->sighand->siglock, flags); | ||
1024 | recalc_sigpending(); | ||
1025 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | ||
1026 | } | ||
1027 | |||
1028 | if (rc) | ||
1029 | *rc = r.rcall; | ||
1030 | else | ||
1031 | kfree(r.rcall); | ||
1032 | |||
1033 | p9_mux_free_request(m, req); | ||
1034 | if (err > 0) | ||
1035 | err = -EIO; | ||
1036 | |||
1037 | return err; | ||
1038 | } | ||
1039 | |||
1040 | #ifdef P9_NONBLOCK | ||
1041 | /** | ||
1042 | * p9_conn_rpcnb - sends 9P request without waiting for response. | ||
1043 | * @m: mux data | ||
1044 | * @tc: request to be sent | ||
1045 | * @cb: callback function to be called when response arrives | ||
1046 | * @cba: value to pass to the callback function | ||
1047 | */ | ||
1048 | int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc, | ||
1049 | p9_conn_req_callback cb, void *a) | ||
1050 | { | ||
1051 | int err; | ||
1052 | struct p9_req *req; | ||
1053 | |||
1054 | req = p9_send_request(m, tc, cb, a); | ||
1055 | if (IS_ERR(req)) { | ||
1056 | err = PTR_ERR(req); | ||
1057 | P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err); | ||
1058 | return PTR_ERR(req); | ||
1059 | } | ||
1060 | |||
1061 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag); | ||
1062 | return 0; | ||
1063 | } | ||
1064 | #endif /* P9_NONBLOCK */ | ||
1065 | |||
1066 | /** | ||
1067 | * p9_conn_cancel - cancel all pending requests with error | ||
1068 | * @m: mux data | ||
1069 | * @err: error code | ||
1070 | */ | ||
1071 | void p9_conn_cancel(struct p9_conn *m, int err) | ||
1072 | { | ||
1073 | struct p9_req *req, *rtmp; | ||
1074 | LIST_HEAD(cancel_list); | ||
1075 | |||
1076 | P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); | ||
1077 | m->err = err; | ||
1078 | spin_lock(&m->lock); | ||
1079 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { | ||
1080 | list_move(&req->req_list, &cancel_list); | ||
1081 | } | ||
1082 | list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { | ||
1083 | list_move(&req->req_list, &cancel_list); | ||
1084 | } | ||
1085 | spin_unlock(&m->lock); | ||
1086 | |||
1087 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { | ||
1088 | list_del(&req->req_list); | ||
1089 | if (!req->err) | ||
1090 | req->err = err; | ||
1091 | |||
1092 | if (req->cb) | ||
1093 | (*req->cb) (req, req->cba); | ||
1094 | else | ||
1095 | kfree(req->rcall); | ||
1096 | } | ||
1097 | |||
1098 | wake_up(&m->equeue); | ||
1099 | } | ||
1100 | |||
75 | /** | 1101 | /** |
76 | * v9fs_parse_options - parse mount options into session structure | 1102 | * v9fs_parse_options - parse mount options into session structure |
77 | * @options: options string passed from mount | 1103 | * @options: options string passed from mount |
@@ -268,7 +1294,7 @@ end: | |||
268 | } | 1294 | } |
269 | 1295 | ||
270 | /** | 1296 | /** |
271 | * p9_sock_close - shutdown socket | 1297 | * p9_fd_close - shutdown socket |
272 | * @trans: private socket structure | 1298 | * @trans: private socket structure |
273 | * | 1299 | * |
274 | */ | 1300 | */ |
@@ -284,6 +1310,8 @@ static void p9_fd_close(struct p9_trans *trans) | |||
284 | if (!ts) | 1310 | if (!ts) |
285 | return; | 1311 | return; |
286 | 1312 | ||
1313 | p9_conn_destroy(ts->conn); | ||
1314 | |||
287 | trans->status = Disconnected; | 1315 | trans->status = Disconnected; |
288 | if (ts->rd) | 1316 | if (ts->rd) |
289 | fput(ts->rd); | 1317 | fput(ts->rd); |
@@ -292,13 +1320,15 @@ static void p9_fd_close(struct p9_trans *trans) | |||
292 | kfree(ts); | 1320 | kfree(ts); |
293 | } | 1321 | } |
294 | 1322 | ||
295 | static struct p9_trans *p9_trans_create_tcp(const char *addr, char *args) | 1323 | static struct p9_trans * |
1324 | p9_trans_create_tcp(const char *addr, char *args, int msize, unsigned char dotu) | ||
296 | { | 1325 | { |
297 | int err; | 1326 | int err; |
298 | struct p9_trans *trans; | 1327 | struct p9_trans *trans; |
299 | struct socket *csocket; | 1328 | struct socket *csocket; |
300 | struct sockaddr_in sin_server; | 1329 | struct sockaddr_in sin_server; |
301 | struct p9_fd_opts opts; | 1330 | struct p9_fd_opts opts; |
1331 | struct p9_trans_fd *p; | ||
302 | 1332 | ||
303 | parse_opts(args, &opts); | 1333 | parse_opts(args, &opts); |
304 | 1334 | ||
@@ -306,11 +1336,10 @@ static struct p9_trans *p9_trans_create_tcp(const char *addr, char *args) | |||
306 | trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL); | 1336 | trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL); |
307 | if (!trans) | 1337 | if (!trans) |
308 | return ERR_PTR(-ENOMEM); | 1338 | return ERR_PTR(-ENOMEM); |
309 | 1339 | trans->msize = msize; | |
310 | trans->write = p9_fd_write; | 1340 | trans->extended = dotu; |
311 | trans->read = p9_fd_read; | 1341 | trans->rpc = p9_fd_rpc; |
312 | trans->close = p9_fd_close; | 1342 | trans->close = p9_fd_close; |
313 | trans->poll = p9_fd_poll; | ||
314 | 1343 | ||
315 | sin_server.sin_family = AF_INET; | 1344 | sin_server.sin_family = AF_INET; |
316 | sin_server.sin_addr.s_addr = in_aton(addr); | 1345 | sin_server.sin_addr.s_addr = in_aton(addr); |
@@ -337,6 +1366,14 @@ static struct p9_trans *p9_trans_create_tcp(const char *addr, char *args) | |||
337 | if (err < 0) | 1366 | if (err < 0) |
338 | goto error; | 1367 | goto error; |
339 | 1368 | ||
1369 | p = (struct p9_trans_fd *) trans->priv; | ||
1370 | p->conn = p9_conn_create(trans); | ||
1371 | if (IS_ERR(p->conn)) { | ||
1372 | err = PTR_ERR(p->conn); | ||
1373 | p->conn = NULL; | ||
1374 | goto error; | ||
1375 | } | ||
1376 | |||
340 | return trans; | 1377 | return trans; |
341 | 1378 | ||
342 | error: | 1379 | error: |
@@ -347,22 +1384,23 @@ error: | |||
347 | return ERR_PTR(err); | 1384 | return ERR_PTR(err); |
348 | } | 1385 | } |
349 | 1386 | ||
350 | static struct p9_trans *p9_trans_create_unix(const char *addr, char *args) | 1387 | static struct p9_trans * |
1388 | p9_trans_create_unix(const char *addr, char *args, int msize, | ||
1389 | unsigned char dotu) | ||
351 | { | 1390 | { |
352 | int err; | 1391 | int err; |
353 | struct socket *csocket; | 1392 | struct socket *csocket; |
354 | struct sockaddr_un sun_server; | 1393 | struct sockaddr_un sun_server; |
355 | struct p9_trans *trans; | 1394 | struct p9_trans *trans; |
1395 | struct p9_trans_fd *p; | ||
356 | 1396 | ||
357 | csocket = NULL; | 1397 | csocket = NULL; |
358 | trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL); | 1398 | trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL); |
359 | if (!trans) | 1399 | if (!trans) |
360 | return ERR_PTR(-ENOMEM); | 1400 | return ERR_PTR(-ENOMEM); |
361 | 1401 | ||
362 | trans->write = p9_fd_write; | 1402 | trans->rpc = p9_fd_rpc; |
363 | trans->read = p9_fd_read; | ||
364 | trans->close = p9_fd_close; | 1403 | trans->close = p9_fd_close; |
365 | trans->poll = p9_fd_poll; | ||
366 | 1404 | ||
367 | if (strlen(addr) > UNIX_PATH_MAX) { | 1405 | if (strlen(addr) > UNIX_PATH_MAX) { |
368 | P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", | 1406 | P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", |
@@ -387,6 +1425,16 @@ static struct p9_trans *p9_trans_create_unix(const char *addr, char *args) | |||
387 | if (err < 0) | 1425 | if (err < 0) |
388 | goto error; | 1426 | goto error; |
389 | 1427 | ||
1428 | trans->msize = msize; | ||
1429 | trans->extended = dotu; | ||
1430 | p = (struct p9_trans_fd *) trans->priv; | ||
1431 | p->conn = p9_conn_create(trans); | ||
1432 | if (IS_ERR(p->conn)) { | ||
1433 | err = PTR_ERR(p->conn); | ||
1434 | p->conn = NULL; | ||
1435 | goto error; | ||
1436 | } | ||
1437 | |||
390 | return trans; | 1438 | return trans; |
391 | 1439 | ||
392 | error: | 1440 | error: |
@@ -397,11 +1445,14 @@ error: | |||
397 | return ERR_PTR(err); | 1445 | return ERR_PTR(err); |
398 | } | 1446 | } |
399 | 1447 | ||
400 | static struct p9_trans *p9_trans_create_fd(const char *name, char *args) | 1448 | static struct p9_trans * |
1449 | p9_trans_create_fd(const char *name, char *args, int msize, | ||
1450 | unsigned char extended) | ||
401 | { | 1451 | { |
402 | int err; | 1452 | int err; |
403 | struct p9_trans *trans; | 1453 | struct p9_trans *trans; |
404 | struct p9_fd_opts opts; | 1454 | struct p9_fd_opts opts; |
1455 | struct p9_trans_fd *p; | ||
405 | 1456 | ||
406 | parse_opts(args, &opts); | 1457 | parse_opts(args, &opts); |
407 | 1458 | ||
@@ -414,15 +1465,23 @@ static struct p9_trans *p9_trans_create_fd(const char *name, char *args) | |||
414 | if (!trans) | 1465 | if (!trans) |
415 | return ERR_PTR(-ENOMEM); | 1466 | return ERR_PTR(-ENOMEM); |
416 | 1467 | ||
417 | trans->write = p9_fd_write; | 1468 | trans->rpc = p9_fd_rpc; |
418 | trans->read = p9_fd_read; | ||
419 | trans->close = p9_fd_close; | 1469 | trans->close = p9_fd_close; |
420 | trans->poll = p9_fd_poll; | ||
421 | 1470 | ||
422 | err = p9_fd_open(trans, opts.rfd, opts.wfd); | 1471 | err = p9_fd_open(trans, opts.rfd, opts.wfd); |
423 | if (err < 0) | 1472 | if (err < 0) |
424 | goto error; | 1473 | goto error; |
425 | 1474 | ||
1475 | trans->msize = msize; | ||
1476 | trans->extended = extended; | ||
1477 | p = (struct p9_trans_fd *) trans->priv; | ||
1478 | p->conn = p9_conn_create(trans); | ||
1479 | if (IS_ERR(p->conn)) { | ||
1480 | err = PTR_ERR(p->conn); | ||
1481 | p->conn = NULL; | ||
1482 | goto error; | ||
1483 | } | ||
1484 | |||
426 | return trans; | 1485 | return trans; |
427 | 1486 | ||
428 | error: | 1487 | error: |
@@ -453,6 +1512,12 @@ static struct p9_trans_module p9_fd_trans = { | |||
453 | 1512 | ||
454 | static int __init p9_trans_fd_init(void) | 1513 | static int __init p9_trans_fd_init(void) |
455 | { | 1514 | { |
1515 | int ret = p9_mux_global_init(); | ||
1516 | if (ret) { | ||
1517 | printk(KERN_WARNING "9p: starting mux failed\n"); | ||
1518 | return ret; | ||
1519 | } | ||
1520 | |||
456 | v9fs_register_trans(&p9_tcp_trans); | 1521 | v9fs_register_trans(&p9_tcp_trans); |
457 | v9fs_register_trans(&p9_unix_trans); | 1522 | v9fs_register_trans(&p9_unix_trans); |
458 | v9fs_register_trans(&p9_fd_trans); | 1523 | v9fs_register_trans(&p9_fd_trans); |
@@ -460,13 +1525,7 @@ static int __init p9_trans_fd_init(void) | |||
460 | return 1; | 1525 | return 1; |
461 | } | 1526 | } |
462 | 1527 | ||
463 | static void __exit p9_trans_fd_exit(void) { | ||
464 | printk(KERN_ERR "Removal of 9p transports not implemented\n"); | ||
465 | BUG(); | ||
466 | } | ||
467 | |||
468 | module_init(p9_trans_fd_init); | 1528 | module_init(p9_trans_fd_init); |
469 | module_exit(p9_trans_fd_exit); | ||
470 | 1529 | ||
471 | MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>"); | 1530 | MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>"); |
472 | MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>"); | 1531 | MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>"); |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index c957080966b4..0117b9fb8480 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -130,29 +130,6 @@ static unsigned int rest_of_page(void *data) | |||
130 | return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE); | 130 | return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE); |
131 | } | 131 | } |
132 | 132 | ||
133 | static int p9_virtio_write(struct p9_trans *trans, void *buf, int count) | ||
134 | { | ||
135 | /* Only use the rpc mechanism for now */ | ||
136 | return count; | ||
137 | } | ||
138 | |||
139 | static int p9_virtio_read(struct p9_trans *trans, void *buf, int count) | ||
140 | { | ||
141 | /* Only use the rpc mechanism for now */ | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | /* The poll function is used by 9p transports to determine if there | ||
146 | * is there is activity available on a particular channel. In our case | ||
147 | * we use it to wait for a callback from the input routines. | ||
148 | */ | ||
149 | static unsigned int | ||
150 | p9_virtio_poll(struct p9_trans *trans, struct poll_table_struct *pt) | ||
151 | { | ||
152 | /* Only use the rpc mechanism for now */ | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static void p9_virtio_close(struct p9_trans *trans) | 133 | static void p9_virtio_close(struct p9_trans *trans) |
157 | { | 134 | { |
158 | struct virtio_chan *chan = trans->priv; | 135 | struct virtio_chan *chan = trans->priv; |
@@ -214,8 +191,7 @@ pack_sg_list(struct scatterlist *sg, int start, int limit, char *data, | |||
214 | } | 191 | } |
215 | 192 | ||
216 | static int | 193 | static int |
217 | p9_virtio_rpc(struct p9_trans *t, struct p9_fcall *tc, struct p9_fcall **rc, | 194 | p9_virtio_rpc(struct p9_trans *t, struct p9_fcall *tc, struct p9_fcall **rc) |
218 | int msize, int dotu) | ||
219 | { | 195 | { |
220 | int in, out; | 196 | int in, out; |
221 | int n, err, size; | 197 | int n, err, size; |
@@ -225,7 +201,7 @@ p9_virtio_rpc(struct p9_trans *t, struct p9_fcall *tc, struct p9_fcall **rc, | |||
225 | unsigned long flags; | 201 | unsigned long flags; |
226 | 202 | ||
227 | if (*rc == NULL) { | 203 | if (*rc == NULL) { |
228 | *rc = kmalloc(sizeof(struct p9_fcall) + msize, GFP_KERNEL); | 204 | *rc = kmalloc(sizeof(struct p9_fcall) + t->msize, GFP_KERNEL); |
229 | if (!*rc) | 205 | if (!*rc) |
230 | return -ENOMEM; | 206 | return -ENOMEM; |
231 | } | 207 | } |
@@ -248,7 +224,7 @@ p9_virtio_rpc(struct p9_trans *t, struct p9_fcall *tc, struct p9_fcall **rc, | |||
248 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio rpc tag %d\n", n); | 224 | P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio rpc tag %d\n", n); |
249 | 225 | ||
250 | out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, tc->sdata, tc->size); | 226 | out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, tc->sdata, tc->size); |
251 | in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata, msize); | 227 | in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata, t->msize); |
252 | 228 | ||
253 | req->status = REQ_STATUS_SENT; | 229 | req->status = REQ_STATUS_SENT; |
254 | 230 | ||
@@ -264,7 +240,7 @@ p9_virtio_rpc(struct p9_trans *t, struct p9_fcall *tc, struct p9_fcall **rc, | |||
264 | 240 | ||
265 | size = le32_to_cpu(*(__le32 *) rdata); | 241 | size = le32_to_cpu(*(__le32 *) rdata); |
266 | 242 | ||
267 | err = p9_deserialize_fcall(rdata, size, *rc, dotu); | 243 | err = p9_deserialize_fcall(rdata, size, *rc, t->extended); |
268 | if (err < 0) { | 244 | if (err < 0) { |
269 | P9_DPRINTK(P9_DEBUG_TRANS, | 245 | P9_DPRINTK(P9_DEBUG_TRANS, |
270 | "9p debug: virtio rpc deserialize returned %d\n", err); | 246 | "9p debug: virtio rpc deserialize returned %d\n", err); |
@@ -275,7 +251,7 @@ p9_virtio_rpc(struct p9_trans *t, struct p9_fcall *tc, struct p9_fcall **rc, | |||
275 | if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { | 251 | if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { |
276 | char buf[150]; | 252 | char buf[150]; |
277 | 253 | ||
278 | p9_printfcall(buf, sizeof(buf), *rc, dotu); | 254 | p9_printfcall(buf, sizeof(buf), *rc, t->extended); |
279 | printk(KERN_NOTICE ">>> %p %s\n", t, buf); | 255 | printk(KERN_NOTICE ">>> %p %s\n", t, buf); |
280 | } | 256 | } |
281 | #endif | 257 | #endif |
@@ -337,7 +313,9 @@ fail: | |||
337 | * alternate channels by matching devname versus a virtio_config entry. | 313 | * alternate channels by matching devname versus a virtio_config entry. |
338 | * We use a simple reference count mechanism to ensure that only a single | 314 | * We use a simple reference count mechanism to ensure that only a single |
339 | * mount has a channel open at a time. */ | 315 | * mount has a channel open at a time. */ |
340 | static struct p9_trans *p9_virtio_create(const char *devname, char *args) | 316 | static struct p9_trans * |
317 | p9_virtio_create(const char *devname, char *args, int msize, | ||
318 | unsigned char extended) | ||
341 | { | 319 | { |
342 | struct p9_trans *trans; | 320 | struct p9_trans *trans; |
343 | struct virtio_chan *chan = channels; | 321 | struct virtio_chan *chan = channels; |
@@ -374,11 +352,9 @@ static struct p9_trans *p9_virtio_create(const char *devname, char *args) | |||
374 | printk(KERN_ERR "9p: couldn't allocate transport\n"); | 352 | printk(KERN_ERR "9p: couldn't allocate transport\n"); |
375 | return ERR_PTR(-ENOMEM); | 353 | return ERR_PTR(-ENOMEM); |
376 | } | 354 | } |
377 | 355 | trans->extended = extended; | |
378 | trans->write = p9_virtio_write; | 356 | trans->msize = msize; |
379 | trans->read = p9_virtio_read; | ||
380 | trans->close = p9_virtio_close; | 357 | trans->close = p9_virtio_close; |
381 | trans->poll = p9_virtio_poll; | ||
382 | trans->rpc = p9_virtio_rpc; | 358 | trans->rpc = p9_virtio_rpc; |
383 | trans->priv = chan; | 359 | trans->priv = chan; |
384 | 360 | ||