aboutsummaryrefslogtreecommitdiffstats
path: root/net/9p
diff options
context:
space:
mode:
authorEric Van Hensbergen <ericvh@gmail.com>2008-10-13 19:45:21 -0400
committerEric Van Hensbergen <ericvh@gmail.com>2008-10-17 12:04:42 -0400
commit91b8534fa8f5e01f249b1bf8df0a2540053549ad (patch)
treefde6b3b63dad229108106553106995889b4f0fa7 /net/9p
parent1b0a763bdd5ed467d0e03b88e045000c749303fb (diff)
9p: make rpc code common and rework flush code
This code moves the rpc function to the common client base, reorganizes the flush code to be more simple and stable, and makes the necessary adjustments to the underlying transports to adapt to the new structure. This reduces the overall amount of code duplication between the transports and should make adding new transports more straightforward. Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
Diffstat (limited to 'net/9p')
-rw-r--r--net/9p/client.c265
-rw-r--r--net/9p/trans_fd.c268
-rw-r--r--net/9p/trans_virtio.c85
3 files changed, 311 insertions, 307 deletions
diff --git a/net/9p/client.c b/net/9p/client.c
index f2d07ef9e6a4..29934febecdb 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -55,6 +55,9 @@ static const match_table_t tokens = {
55 {Opt_err, NULL}, 55 {Opt_err, NULL},
56}; 56};
57 57
58static int
59p9_client_rpc(struct p9_client *c, struct p9_fcall *tc, struct p9_fcall **rc);
60
58/** 61/**
59 * v9fs_parse_options - parse mount options into session structure 62 * v9fs_parse_options - parse mount options into session structure
60 * @options: options string passed from mount 63 * @options: options string passed from mount
@@ -269,6 +272,36 @@ static void p9_tag_cleanup(struct p9_client *c)
269} 272}
270 273
271/** 274/**
275 * p9_client_flush - flush (cancel) a request
276 * c: client state
277 * req: request to cancel
278 *
279 * This sents a flush for a particular requests and links
280 * the flush request to the original request. The current
281 * code only supports a single flush request although the protocol
282 * allows for multiple flush requests to be sent for a single request.
283 *
284 */
285
286static int p9_client_flush(struct p9_client *c, struct p9_req_t *req)
287{
288 struct p9_fcall *tc, *rc = NULL;
289 int err;
290
291 P9_DPRINTK(P9_DEBUG_9P, "client %p tag %d\n", c, req->tc->tag);
292
293 tc = p9_create_tflush(req->tc->tag);
294 if (IS_ERR(tc))
295 return PTR_ERR(tc);
296
297 err = p9_client_rpc(c, tc, &rc);
298
299 /* we don't free anything here because RPC isn't complete */
300
301 return err;
302}
303
304/**
272 * p9_free_req - free a request and clean-up as necessary 305 * p9_free_req - free a request and clean-up as necessary
273 * c: client state 306 * c: client state
274 * r: request to release 307 * r: request to release
@@ -289,6 +322,224 @@ void p9_free_req(struct p9_client *c, struct p9_req_t *r)
289 } 322 }
290} 323}
291 324
325/**
326 * p9_client_cb - call back from transport to client
327 * c: client state
328 * req: request received
329 *
330 */
331void p9_client_cb(struct p9_client *c, struct p9_req_t *req)
332{
333 struct p9_req_t *other_req;
334 unsigned long flags;
335
336 P9_DPRINTK(P9_DEBUG_MUX, ": %d\n", req->tc->tag);
337
338 if (req->status == REQ_STATUS_ERROR)
339 wake_up(req->wq);
340
341 if (req->tc->id == P9_TFLUSH) { /* flush receive path */
342 P9_DPRINTK(P9_DEBUG_MUX, "flush: %d\n", req->tc->tag);
343 spin_lock_irqsave(&c->lock, flags);
344 other_req = p9_tag_lookup(c, req->tc->params.tflush.oldtag);
345 if (other_req->flush_tag != req->tc->tag) /* stale flush */
346 spin_unlock_irqrestore(&c->lock, flags);
347 else {
348 BUG_ON(other_req->status != REQ_STATUS_FLSH);
349 other_req->status = REQ_STATUS_FLSHD;
350 spin_unlock_irqrestore(&c->lock, flags);
351 wake_up(other_req->wq);
352 }
353 p9_free_req(c, req);
354 } else { /* normal receive path */
355 P9_DPRINTK(P9_DEBUG_MUX, "normal: %d\n", req->tc->tag);
356 spin_lock_irqsave(&c->lock, flags);
357 if (req->status != REQ_STATUS_FLSHD)
358 req->status = REQ_STATUS_RCVD;
359 req->flush_tag = P9_NOTAG;
360 spin_unlock_irqrestore(&c->lock, flags);
361 wake_up(req->wq);
362 P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag);
363 }
364}
365EXPORT_SYMBOL(p9_client_cb);
366
367/**
368 * p9_client_rpc - issue a request and wait for a response
369 * @c: client session
370 * @tc: &p9_fcall request to transmit
371 * @rc: &p9_fcall to put reponse into
372 *
373 * Returns 0 on success, error code on failure
374 */
375
376static int
377p9_client_rpc(struct p9_client *c, struct p9_fcall *tc, struct p9_fcall **rc)
378{
379 int tag, err, size;
380 char *rdata;
381 struct p9_req_t *req;
382 unsigned long flags;
383 int sigpending;
384 int flushed = 0;
385
386 P9_DPRINTK(P9_DEBUG_9P, "client %p tc %p rc %p\n", c, tc, rc);
387
388 if (c->status != Connected)
389 return -EIO;
390
391 if (signal_pending(current)) {
392 sigpending = 1;
393 clear_thread_flag(TIF_SIGPENDING);
394 } else
395 sigpending = 0;
396
397 tag = P9_NOTAG;
398 if (tc->id != P9_TVERSION) {
399 tag = p9_idpool_get(c->tagpool);
400 if (tag < 0)
401 return -ENOMEM;
402 }
403
404 req = p9_tag_alloc(c, tag);
405
406 /* if this is a flush request, backlink flush request now to
407 * avoid race conditions later. */
408 if (tc->id == P9_TFLUSH) {
409 struct p9_req_t *other_req =
410 p9_tag_lookup(c, tc->params.tflush.oldtag);
411 if (other_req->status == REQ_STATUS_FLSH)
412 other_req->flush_tag = tag;
413 }
414
415 p9_set_tag(tc, tag);
416
417 /*
418 * if client passed in a pre-allocated response fcall struct
419 * then we just use that, otherwise we allocate one.
420 */
421
422 if (rc == NULL)
423 req->rc = NULL;
424 else
425 req->rc = *rc;
426 if (req->rc == NULL) {
427 req->rc = kmalloc(sizeof(struct p9_fcall) + c->msize,
428 GFP_KERNEL);
429 if (!req->rc) {
430 err = -ENOMEM;
431 p9_idpool_put(tag, c->tagpool);
432 p9_free_req(c, req);
433 goto reterr;
434 }
435 *rc = req->rc;
436 }
437
438 rdata = (char *)req->rc+sizeof(struct p9_fcall);
439
440 req->tc = tc;
441 P9_DPRINTK(P9_DEBUG_9P, "request: tc: %p rc: %p\n", req->tc, req->rc);
442
443 err = c->trans_mod->request(c, req);
444 if (err < 0) {
445 c->status = Disconnected;
446 goto reterr;
447 }
448
449 /* if it was a flush we just transmitted, return our tag */
450 if (tc->id == P9_TFLUSH)
451 return 0;
452again:
453 P9_DPRINTK(P9_DEBUG_9P, "wait %p tag: %d\n", req->wq, tag);
454 err = wait_event_interruptible(*req->wq,
455 req->status >= REQ_STATUS_RCVD);
456 P9_DPRINTK(P9_DEBUG_9P, "wait %p tag: %d returned %d (flushed=%d)\n",
457 req->wq, tag, err, flushed);
458
459 if (req->status == REQ_STATUS_ERROR) {
460 P9_DPRINTK(P9_DEBUG_9P, "req_status error %d\n", req->t_err);
461 err = req->t_err;
462 } else if (err == -ERESTARTSYS && flushed) {
463 P9_DPRINTK(P9_DEBUG_9P, "flushed - going again\n");
464 goto again;
465 } else if (req->status == REQ_STATUS_FLSHD) {
466 P9_DPRINTK(P9_DEBUG_9P, "flushed - erestartsys\n");
467 err = -ERESTARTSYS;
468 }
469
470 if ((err == -ERESTARTSYS) && (c->status == Connected) && (!flushed)) {
471 P9_DPRINTK(P9_DEBUG_9P, "flushing\n");
472 spin_lock_irqsave(&c->lock, flags);
473 if (req->status == REQ_STATUS_SENT)
474 req->status = REQ_STATUS_FLSH;
475 spin_unlock_irqrestore(&c->lock, flags);
476 sigpending = 1;
477 flushed = 1;
478 clear_thread_flag(TIF_SIGPENDING);
479
480 if (c->trans_mod->cancel(c, req)) {
481 err = p9_client_flush(c, req);
482 if (err == 0)
483 goto again;
484 }
485 }
486
487 if (sigpending) {
488 spin_lock_irqsave(&current->sighand->siglock, flags);
489 recalc_sigpending();
490 spin_unlock_irqrestore(&current->sighand->siglock, flags);
491 }
492
493 if (err < 0)
494 goto reterr;
495
496 size = le32_to_cpu(*(__le32 *) rdata);
497
498 err = p9_deserialize_fcall(rdata, size, req->rc, c->dotu);
499 if (err < 0) {
500 P9_DPRINTK(P9_DEBUG_9P,
501 "9p debug: client rpc deserialize returned %d\n", err);
502 goto reterr;
503 }
504
505#ifdef CONFIG_NET_9P_DEBUG
506 if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
507 char buf[150];
508
509 p9_printfcall(buf, sizeof(buf), req->rc, c->dotu);
510 printk(KERN_NOTICE ">>> %p %s\n", c, buf);
511 }
512#endif
513
514 if (req->rc->id == P9_RERROR) {
515 int ecode = req->rc->params.rerror.errno;
516 struct p9_str *ename = &req->rc->params.rerror.error;
517
518 P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
519 ename->str);
520
521 if (c->dotu)
522 err = -ecode;
523
524 if (!err) {
525 err = p9_errstr2errno(ename->str, ename->len);
526
527 /* string match failed */
528 if (!err) {
529 PRINT_FCALL_ERROR("unknown error", req->rc);
530 err = -ESERVERFAULT;
531 }
532 }
533 } else
534 err = 0;
535
536reterr:
537 p9_free_req(c, req);
538
539 P9_DPRINTK(P9_DEBUG_9P, "returning %d\n", err);
540 return err;
541}
542
292static struct p9_fid *p9_fid_create(struct p9_client *clnt) 543static struct p9_fid *p9_fid_create(struct p9_client *clnt)
293{ 544{
294 int err; 545 int err;
@@ -339,20 +590,6 @@ static void p9_fid_destroy(struct p9_fid *fid)
339 kfree(fid); 590 kfree(fid);
340} 591}
341 592
342/**
343 * p9_client_rpc - sends 9P request and waits until a response is available.
344 * The function can be interrupted.
345 * @c: client data
346 * @tc: request to be sent
347 * @rc: pointer where a pointer to the response is stored
348 */
349int
350p9_client_rpc(struct p9_client *c, struct p9_fcall *tc,
351 struct p9_fcall **rc)
352{
353 return c->trans_mod->rpc(c, tc, rc);
354}
355
356struct p9_client *p9_client_create(const char *dev_name, char *options) 593struct p9_client *p9_client_create(const char *dev_name, char *options)
357{ 594{
358 int err, n; 595 int err, n;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 627e3f097fc5..6bfc013f8b6f 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -174,44 +174,6 @@ static void p9_mux_poll_stop(struct p9_conn *m)
174 spin_unlock_irqrestore(&p9_poll_lock, flags); 174 spin_unlock_irqrestore(&p9_poll_lock, flags);
175} 175}
176 176
177static void p9_conn_rpc_cb(struct p9_client *, struct p9_req_t *);
178
179static void p9_mux_flush_cb(struct p9_client *client, struct p9_req_t *freq)
180{
181 struct p9_conn *m = client->trans;
182 struct p9_req_t *req;
183
184 P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
185 freq->tc, freq->rc, freq->t_err,
186 freq->tc->params.tflush.oldtag);
187
188 req = p9_tag_lookup(client, freq->tc->params.tflush.oldtag);
189 if (req) {
190 req->status = REQ_STATUS_FLSHD;
191 list_del(&req->req_list);
192 p9_conn_rpc_cb(client, req);
193 }
194
195 p9_free_req(client, freq);
196}
197
198static void p9_conn_rpc_cb(struct p9_client *client, struct p9_req_t *req)
199{
200 P9_DPRINTK(P9_DEBUG_MUX, "req %p\n", req);
201
202 if (req->tc->id == P9_TFLUSH) { /* flush callback */
203 P9_DPRINTK(P9_DEBUG_MUX, "flush req %p\n", req);
204 p9_mux_flush_cb(client, req);
205 } else { /* normal wakeup path */
206 P9_DPRINTK(P9_DEBUG_MUX, "normal req %p\n", req);
207 if (!req->t_err && (req->status == REQ_STATUS_FLSHD ||
208 req->status == REQ_STATUS_FLSH))
209 req->t_err = -ERESTARTSYS;
210
211 wake_up(req->wq);
212 }
213}
214
215/** 177/**
216 * p9_conn_cancel - cancel all pending requests with error 178 * p9_conn_cancel - cancel all pending requests with error
217 * @m: mux data 179 * @m: mux data
@@ -222,11 +184,12 @@ static void p9_conn_rpc_cb(struct p9_client *client, struct p9_req_t *req)
222void p9_conn_cancel(struct p9_conn *m, int err) 184void p9_conn_cancel(struct p9_conn *m, int err)
223{ 185{
224 struct p9_req_t *req, *rtmp; 186 struct p9_req_t *req, *rtmp;
187 unsigned long flags;
225 LIST_HEAD(cancel_list); 188 LIST_HEAD(cancel_list);
226 189
227 P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); 190 P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
228 m->err = err; 191 m->err = err;
229 spin_lock(&m->client->lock); 192 spin_lock_irqsave(&m->client->lock, flags);
230 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { 193 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
231 req->status = REQ_STATUS_ERROR; 194 req->status = REQ_STATUS_ERROR;
232 if (!req->t_err) 195 if (!req->t_err)
@@ -239,44 +202,12 @@ void p9_conn_cancel(struct p9_conn *m, int err)
239 req->t_err = err; 202 req->t_err = err;
240 list_move(&req->req_list, &cancel_list); 203 list_move(&req->req_list, &cancel_list);
241 } 204 }
242 spin_unlock(&m->client->lock); 205 spin_unlock_irqrestore(&m->client->lock, flags);
243 206
244 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { 207 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
245 list_del(&req->req_list); 208 list_del(&req->req_list);
246 p9_conn_rpc_cb(m->client, req); 209 P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req);
247 } 210 p9_client_cb(m->client, req);
248}
249
250static void process_request(struct p9_conn *m, struct p9_req_t *req)
251{
252 int ecode;
253 struct p9_str *ename;
254
255 if (!req->t_err && req->rc->id == P9_RERROR) {
256 ecode = req->rc->params.rerror.errno;
257 ename = &req->rc->params.rerror.error;
258
259 P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
260 ename->str);
261
262 if (m->client->dotu)
263 req->t_err = -ecode;
264
265 if (!req->t_err) {
266 req->t_err = p9_errstr2errno(ename->str, ename->len);
267
268 /* string match failed */
269 if (!req->t_err) {
270 PRINT_FCALL_ERROR("unknown error", req->rc);
271 req->t_err = -ESERVERFAULT;
272 }
273 }
274 } else if (req->tc && req->rc->id != req->tc->id + 1) {
275 P9_DPRINTK(P9_DEBUG_ERROR,
276 "fcall mismatch: expected %d, got %d\n",
277 req->tc->id + 1, req->rc->id);
278 if (!req->t_err)
279 req->t_err = -EIO;
280 } 211 }
281} 212}
282 213
@@ -421,41 +352,13 @@ static void p9_read_work(struct work_struct *work)
421 /* not an else because some packets (like clunk) have no payload */ 352 /* not an else because some packets (like clunk) have no payload */
422 if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */ 353 if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
423 P9_DPRINTK(P9_DEBUG_MUX, "got new packet\n"); 354 P9_DPRINTK(P9_DEBUG_MUX, "got new packet\n");
424 m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall);
425 err = p9_deserialize_fcall(m->rbuf, m->rsize, m->req->rc,
426 m->client->dotu);
427 if (err < 0) {
428 m->req = NULL;
429 goto error;
430 }
431
432#ifdef CONFIG_NET_9P_DEBUG
433 if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
434 char buf[150];
435
436 p9_printfcall(buf, sizeof(buf), m->req->rc,
437 m->client->dotu);
438 printk(KERN_NOTICE ">>> %p %s\n", m, buf);
439 }
440#endif
441 355
442 P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, 356 list_del(&m->req->req_list);
443 m->req->rc->id, m->req->rc->tag); 357 p9_client_cb(m->client, m->req);
444 358
445 m->rbuf = NULL; 359 m->rbuf = NULL;
446 m->rpos = 0; 360 m->rpos = 0;
447 m->rsize = 0; 361 m->rsize = 0;
448
449 if (m->req->status != REQ_STATUS_FLSH) {
450 list_del(&m->req->req_list);
451 m->req->status = REQ_STATUS_RCVD;
452 }
453
454 process_request(m, m->req);
455
456 if (m->req->status != REQ_STATUS_FLSH)
457 p9_conn_rpc_cb(m->client, m->req);
458
459 m->req = NULL; 362 m->req = NULL;
460 } 363 }
461 364
@@ -741,57 +644,41 @@ static void p9_poll_mux(struct p9_conn *m)
741} 644}
742 645
743/** 646/**
744 * p9_send_request - send 9P request 647 * p9_fd_request - send 9P request
745 * The function can sleep until the request is scheduled for sending. 648 * The function can sleep until the request is scheduled for sending.
746 * The function can be interrupted. Return from the function is not 649 * The function can be interrupted. Return from the function is not
747 * a guarantee that the request is sent successfully. Can return errors 650 * a guarantee that the request is sent successfully.
748 * that can be retrieved by PTR_ERR macros.
749 * 651 *
750 * @m: mux data 652 * @client: client instance
751 * @tc: request to be sent 653 * @req: request to be sent
752 * 654 *
753 */ 655 */
754 656
755static struct p9_req_t *p9_send_request(struct p9_conn *m, struct p9_fcall *tc) 657static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
756{ 658{
757 int tag;
758 int n; 659 int n;
759 struct p9_req_t *req; 660 struct p9_trans_fd *ts = client->trans;
661 struct p9_conn *m = ts->conn;
760 662
761 P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current, 663 P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
762 tc, tc->id); 664 req->tc, req->tc->id);
763 if (m->err < 0) 665 if (m->err < 0)
764 return ERR_PTR(m->err); 666 return m->err;
765
766 tag = P9_NOTAG;
767 if (tc->id != P9_TVERSION) {
768 tag = p9_idpool_get(m->client->tagpool);
769 if (tag < 0)
770 return ERR_PTR(-ENOMEM);
771 }
772
773 p9_set_tag(tc, tag);
774
775 req = p9_tag_alloc(m->client, tag);
776 667
777#ifdef CONFIG_NET_9P_DEBUG 668#ifdef CONFIG_NET_9P_DEBUG
778 if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { 669 if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
779 char buf[150]; 670 char buf[150];
780 671
781 p9_printfcall(buf, sizeof(buf), tc, m->client->dotu); 672 p9_printfcall(buf, sizeof(buf), req->tc, client->dotu);
782 printk(KERN_NOTICE "<<< %p %s\n", m, buf); 673 printk(KERN_NOTICE "<<< %p %s\n", m, buf);
783 } 674 }
784#endif 675#endif
785 676
786 req->tag = tag;
787 req->tc = tc;
788 req->rc = NULL;
789 req->t_err = 0;
790 req->status = REQ_STATUS_UNSENT; 677 req->status = REQ_STATUS_UNSENT;
791 678
792 spin_lock(&m->client->lock); 679 spin_lock(&client->lock);
793 list_add_tail(&req->req_list, &m->unsent_req_list); 680 list_add_tail(&req->req_list, &m->unsent_req_list);
794 spin_unlock(&m->client->lock); 681 spin_unlock(&client->lock);
795 682
796 if (test_and_clear_bit(Wpending, &m->wsched)) 683 if (test_and_clear_bit(Wpending, &m->wsched))
797 n = POLLOUT; 684 n = POLLOUT;
@@ -801,17 +688,20 @@ static struct p9_req_t *p9_send_request(struct p9_conn *m, struct p9_fcall *tc)
801 if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) 688 if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
802 queue_work(p9_mux_wq, &m->wq); 689 queue_work(p9_mux_wq, &m->wq);
803 690
804 return req; 691 return 0;
805} 692}
806 693
807static int 694static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
808p9_mux_flush_request(struct p9_conn *m, struct p9_req_t *req)
809{ 695{
810 struct p9_fcall *fc; 696 struct p9_trans_fd *ts = client->trans;
811 struct p9_req_t *rreq, *rptr; 697 struct p9_conn *m = ts->conn;
812 698
813 P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); 699 P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
814 700
701 spin_lock(&client->lock);
702 list_del(&req->req_list);
703 spin_unlock(&client->lock);
704
815 /* if a response was received for a request, do nothing */ 705 /* if a response was received for a request, do nothing */
816 if (req->rc || req->t_err) { 706 if (req->rc || req->t_err) {
817 P9_DPRINTK(P9_DEBUG_MUX, 707 P9_DPRINTK(P9_DEBUG_MUX,
@@ -819,104 +709,15 @@ p9_mux_flush_request(struct p9_conn *m, struct p9_req_t *req)
819 return 0; 709 return 0;
820 } 710 }
821 711
822 req->status = REQ_STATUS_FLSH; 712 if (req->status == REQ_STATUS_UNSENT) {
823 713 req->status = REQ_STATUS_FLSHD;
824 spin_lock(&m->client->lock); 714 return 0;
825 /* if the request is not sent yet, just remove it from the list */
826 list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
827 if (rreq->tag == req->tag) {
828 P9_DPRINTK(P9_DEBUG_MUX,
829 "mux %p req %p request is not sent yet\n", m, req);
830 list_del(&rreq->req_list);
831 req->status = REQ_STATUS_FLSHD;
832 spin_unlock(&m->client->lock);
833 p9_conn_rpc_cb(m->client, req);
834 return 0;
835 }
836 } 715 }
837 spin_unlock(&m->client->lock);
838 716
839 clear_thread_flag(TIF_SIGPENDING);
840 fc = p9_create_tflush(req->tag);
841 p9_send_request(m, fc);
842 return 1; 717 return 1;
843} 718}
844 719
845/** 720/**
846 * p9_fd_rpc- sends 9P request and waits until a response is available.
847 * The function can be interrupted.
848 * @client: client instance
849 * @tc: request to be sent
850 * @rc: pointer where a pointer to the response is stored
851 *
852 */
853
854int
855p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc)
856{
857 struct p9_trans_fd *p = client->trans;
858 struct p9_conn *m = p->conn;
859 int err, sigpending;
860 unsigned long flags;
861 struct p9_req_t *req;
862
863 if (rc)
864 *rc = NULL;
865
866 sigpending = 0;
867 if (signal_pending(current)) {
868 sigpending = 1;
869 clear_thread_flag(TIF_SIGPENDING);
870 }
871
872 req = p9_send_request(m, tc);
873 if (IS_ERR(req)) {
874 err = PTR_ERR(req);
875 P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
876 return err;
877 }
878
879 err = wait_event_interruptible(*req->wq, req->rc != NULL ||
880 req->t_err < 0);
881 if (req->t_err < 0)
882 err = req->t_err;
883
884 if (err == -ERESTARTSYS && client->status == Connected
885 && m->err == 0) {
886 if (p9_mux_flush_request(m, req)) {
887 /* wait until we get response of the flush message */
888 do {
889 clear_thread_flag(TIF_SIGPENDING);
890 err = wait_event_interruptible(*req->wq,
891 req->rc || req->t_err);
892 } while (!req->rc && !req->t_err &&
893 err == -ERESTARTSYS &&
894 client->status == Connected && !m->err);
895
896 err = -ERESTARTSYS;
897 }
898 sigpending = 1;
899 }
900
901 if (sigpending) {
902 spin_lock_irqsave(&current->sighand->siglock, flags);
903 recalc_sigpending();
904 spin_unlock_irqrestore(&current->sighand->siglock, flags);
905 }
906
907 if (rc)
908 *rc = req->rc;
909 else
910 kfree(req->rc);
911
912 p9_free_req(client, req);
913 if (err > 0)
914 err = -EIO;
915
916 return err;
917}
918
919/**
920 * parse_options - parse mount options into session structure 721 * parse_options - parse mount options into session structure
921 * @options: options string passed from mount 722 * @options: options string passed from mount
922 * @opts: transport-specific structure to parse options into 723 * @opts: transport-specific structure to parse options into
@@ -1243,7 +1044,8 @@ static struct p9_trans_module p9_tcp_trans = {
1243 .def = 1, 1044 .def = 1,
1244 .create = p9_fd_create_tcp, 1045 .create = p9_fd_create_tcp,
1245 .close = p9_fd_close, 1046 .close = p9_fd_close,
1246 .rpc = p9_fd_rpc, 1047 .request = p9_fd_request,
1048 .cancel = p9_fd_cancel,
1247 .owner = THIS_MODULE, 1049 .owner = THIS_MODULE,
1248}; 1050};
1249 1051
@@ -1253,7 +1055,8 @@ static struct p9_trans_module p9_unix_trans = {
1253 .def = 0, 1055 .def = 0,
1254 .create = p9_fd_create_unix, 1056 .create = p9_fd_create_unix,
1255 .close = p9_fd_close, 1057 .close = p9_fd_close,
1256 .rpc = p9_fd_rpc, 1058 .request = p9_fd_request,
1059 .cancel = p9_fd_cancel,
1257 .owner = THIS_MODULE, 1060 .owner = THIS_MODULE,
1258}; 1061};
1259 1062
@@ -1263,7 +1066,8 @@ static struct p9_trans_module p9_fd_trans = {
1263 .def = 0, 1066 .def = 0,
1264 .create = p9_fd_create, 1067 .create = p9_fd_create,
1265 .close = p9_fd_close, 1068 .close = p9_fd_close,
1266 .rpc = p9_fd_rpc, 1069 .request = p9_fd_request,
1070 .cancel = p9_fd_cancel,
1267 .owner = THIS_MODULE, 1071 .owner = THIS_MODULE,
1268}; 1072};
1269 1073
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e18de14c30d5..2d7781ec663b 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -126,17 +126,16 @@ static void req_done(struct virtqueue *vq)
126 struct virtio_chan *chan = vq->vdev->priv; 126 struct virtio_chan *chan = vq->vdev->priv;
127 struct p9_fcall *rc; 127 struct p9_fcall *rc;
128 unsigned int len; 128 unsigned int len;
129 unsigned long flags;
130 struct p9_req_t *req; 129 struct p9_req_t *req;
131 130
132 spin_lock_irqsave(&chan->lock, flags); 131 P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
132
133 while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) { 133 while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) {
134 P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
135 P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
134 req = p9_tag_lookup(chan->client, rc->tag); 136 req = p9_tag_lookup(chan->client, rc->tag);
135 req->status = REQ_STATUS_RCVD; 137 p9_client_cb(chan->client, req);
136 wake_up(req->wq);
137 } 138 }
138 /* In case queue is stopped waiting for more buffers. */
139 spin_unlock_irqrestore(&chan->lock, flags);
140} 139}
141 140
142/** 141/**
@@ -173,8 +172,14 @@ pack_sg_list(struct scatterlist *sg, int start, int limit, char *data,
173 return index-start; 172 return index-start;
174} 173}
175 174
175/* We don't currently allow canceling of virtio requests */
176static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
177{
178 return 1;
179}
180
176/** 181/**
177 * p9_virtio_rpc - issue a request and wait for a response 182 * p9_virtio_request - issue a request
178 * @t: transport state 183 * @t: transport state
179 * @tc: &p9_fcall request to transmit 184 * @tc: &p9_fcall request to transmit
180 * @rc: &p9_fcall to put reponse into 185 * @rc: &p9_fcall to put reponse into
@@ -182,44 +187,22 @@ pack_sg_list(struct scatterlist *sg, int start, int limit, char *data,
182 */ 187 */
183 188
184static int 189static int
185p9_virtio_rpc(struct p9_client *c, struct p9_fcall *tc, struct p9_fcall **rc) 190p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
186{ 191{
187 int in, out; 192 int in, out;
188 int n, err, size; 193 struct virtio_chan *chan = client->trans;
189 struct virtio_chan *chan = c->trans; 194 char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
190 char *rdata;
191 struct p9_req_t *req;
192 unsigned long flags;
193
194 if (*rc == NULL) {
195 *rc = kmalloc(sizeof(struct p9_fcall) + c->msize, GFP_KERNEL);
196 if (!*rc)
197 return -ENOMEM;
198 }
199
200 rdata = (char *)*rc+sizeof(struct p9_fcall);
201
202 n = P9_NOTAG;
203 if (tc->id != P9_TVERSION) {
204 n = p9_idpool_get(c->tagpool);
205 if (n < 0)
206 return -ENOMEM;
207 }
208
209 spin_lock_irqsave(&chan->lock, flags);
210 req = p9_tag_alloc(c, n);
211 spin_unlock_irqrestore(&chan->lock, flags);
212
213 p9_set_tag(tc, n);
214 195
215 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio rpc tag %d\n", n); 196 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
216 197
217 out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, tc->sdata, tc->size); 198 out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
218 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata, c->msize); 199 req->tc->size);
200 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata,
201 client->msize);
219 202
220 req->status = REQ_STATUS_SENT; 203 req->status = REQ_STATUS_SENT;
221 204
222 if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, tc)) { 205 if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc)) {
223 P9_DPRINTK(P9_DEBUG_TRANS, 206 P9_DPRINTK(P9_DEBUG_TRANS,
224 "9p debug: virtio rpc add_buf returned failure"); 207 "9p debug: virtio rpc add_buf returned failure");
225 return -EIO; 208 return -EIO;
@@ -227,28 +210,7 @@ p9_virtio_rpc(struct p9_client *c, struct p9_fcall *tc, struct p9_fcall **rc)
227 210
228 chan->vq->vq_ops->kick(chan->vq); 211 chan->vq->vq_ops->kick(chan->vq);
229 212
230 wait_event(*req->wq, req->status == REQ_STATUS_RCVD); 213 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
231
232 size = le32_to_cpu(*(__le32 *) rdata);
233
234 err = p9_deserialize_fcall(rdata, size, *rc, c->dotu);
235 if (err < 0) {
236 P9_DPRINTK(P9_DEBUG_TRANS,
237 "9p debug: virtio rpc deserialize returned %d\n", err);
238 return err;
239 }
240
241#ifdef CONFIG_NET_9P_DEBUG
242 if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
243 char buf[150];
244
245 p9_printfcall(buf, sizeof(buf), *rc, c->dotu);
246 printk(KERN_NOTICE ">>> %p %s\n", c, buf);
247 }
248#endif
249
250 p9_free_req(c, req);
251
252 return 0; 214 return 0;
253} 215}
254 216
@@ -394,7 +356,8 @@ static struct p9_trans_module p9_virtio_trans = {
394 .name = "virtio", 356 .name = "virtio",
395 .create = p9_virtio_create, 357 .create = p9_virtio_create,
396 .close = p9_virtio_close, 358 .close = p9_virtio_close,
397 .rpc = p9_virtio_rpc, 359 .request = p9_virtio_request,
360 .cancel = p9_virtio_cancel,
398 .maxsize = PAGE_SIZE*16, 361 .maxsize = PAGE_SIZE*16,
399 .def = 0, 362 .def = 0,
400 .owner = THIS_MODULE, 363 .owner = THIS_MODULE,