diff options
author | Latchesar Ionkov <lucho@ionkov.net> | 2009-04-05 17:28:59 -0400 |
---|---|---|
committer | Eric Van Hensbergen <ericvh@vTrogdor.(none)> | 2009-04-05 17:54:53 -0400 |
commit | 1bab88b2310998de18b32529a27ea835d164254a (patch) | |
tree | fed704a8b3b4ce93d8cd25462ccc9186dec84b7a /net/9p/client.c | |
parent | 742b11a7ec60faa25d76c95c268041ab215c25ad (diff) |
net/9p: handle correctly interrupted 9P requests
Currently the 9p code crashes when a operation is interrupted, i.e. for
example when the user presses ^C while reading from a file.
This patch fixes the code that is responsible for interruption and flushing
of 9P operations.
Signed-off-by: Latchesar Ionkov <lucho@ionkov.net>
Diffstat (limited to 'net/9p/client.c')
-rw-r--r-- | net/9p/client.c | 74 |
1 files changed, 19 insertions, 55 deletions
diff --git a/net/9p/client.c b/net/9p/client.c index 781d89a952e4..dd43a8289b0d 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -203,7 +203,6 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag) | |||
203 | p9pdu_reset(req->tc); | 203 | p9pdu_reset(req->tc); |
204 | p9pdu_reset(req->rc); | 204 | p9pdu_reset(req->rc); |
205 | 205 | ||
206 | req->flush_tag = 0; | ||
207 | req->tc->tag = tag-1; | 206 | req->tc->tag = tag-1; |
208 | req->status = REQ_STATUS_ALLOC; | 207 | req->status = REQ_STATUS_ALLOC; |
209 | 208 | ||
@@ -324,35 +323,9 @@ static void p9_free_req(struct p9_client *c, struct p9_req_t *r) | |||
324 | */ | 323 | */ |
325 | void p9_client_cb(struct p9_client *c, struct p9_req_t *req) | 324 | void p9_client_cb(struct p9_client *c, struct p9_req_t *req) |
326 | { | 325 | { |
327 | struct p9_req_t *other_req; | ||
328 | unsigned long flags; | ||
329 | |||
330 | P9_DPRINTK(P9_DEBUG_MUX, " tag %d\n", req->tc->tag); | 326 | P9_DPRINTK(P9_DEBUG_MUX, " tag %d\n", req->tc->tag); |
331 | 327 | wake_up(req->wq); | |
332 | if (req->status == REQ_STATUS_ERROR) | 328 | P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag); |
333 | wake_up(req->wq); | ||
334 | |||
335 | if (req->flush_tag) { /* flush receive path */ | ||
336 | P9_DPRINTK(P9_DEBUG_9P, "<<< RFLUSH %d\n", req->tc->tag); | ||
337 | spin_lock_irqsave(&c->lock, flags); | ||
338 | other_req = p9_tag_lookup(c, req->flush_tag); | ||
339 | if (other_req->status != REQ_STATUS_FLSH) /* stale flush */ | ||
340 | spin_unlock_irqrestore(&c->lock, flags); | ||
341 | else { | ||
342 | other_req->status = REQ_STATUS_FLSHD; | ||
343 | spin_unlock_irqrestore(&c->lock, flags); | ||
344 | wake_up(other_req->wq); | ||
345 | } | ||
346 | p9_free_req(c, req); | ||
347 | } else { /* normal receive path */ | ||
348 | P9_DPRINTK(P9_DEBUG_MUX, "normal: tag %d\n", req->tc->tag); | ||
349 | spin_lock_irqsave(&c->lock, flags); | ||
350 | if (req->status != REQ_STATUS_FLSHD) | ||
351 | req->status = REQ_STATUS_RCVD; | ||
352 | spin_unlock_irqrestore(&c->lock, flags); | ||
353 | wake_up(req->wq); | ||
354 | P9_DPRINTK(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag); | ||
355 | } | ||
356 | } | 329 | } |
357 | EXPORT_SYMBOL(p9_client_cb); | 330 | EXPORT_SYMBOL(p9_client_cb); |
358 | 331 | ||
@@ -486,9 +459,15 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq) | |||
486 | if (IS_ERR(req)) | 459 | if (IS_ERR(req)) |
487 | return PTR_ERR(req); | 460 | return PTR_ERR(req); |
488 | 461 | ||
489 | req->flush_tag = oldtag; | ||
490 | 462 | ||
491 | /* we don't free anything here because RPC isn't complete */ | 463 | /* if we haven't received a response for oldreq, |
464 | remove it from the list. */ | ||
465 | spin_lock(&c->lock); | ||
466 | if (oldreq->status == REQ_STATUS_FLSH) | ||
467 | list_del(&oldreq->req_list); | ||
468 | spin_unlock(&c->lock); | ||
469 | |||
470 | p9_free_req(c, req); | ||
492 | return 0; | 471 | return 0; |
493 | } | 472 | } |
494 | 473 | ||
@@ -509,7 +488,6 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) | |||
509 | struct p9_req_t *req; | 488 | struct p9_req_t *req; |
510 | unsigned long flags; | 489 | unsigned long flags; |
511 | int sigpending; | 490 | int sigpending; |
512 | int flushed = 0; | ||
513 | 491 | ||
514 | P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type); | 492 | P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type); |
515 | 493 | ||
@@ -546,42 +524,28 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) | |||
546 | goto reterr; | 524 | goto reterr; |
547 | } | 525 | } |
548 | 526 | ||
549 | /* if it was a flush we just transmitted, return our tag */ | ||
550 | if (type == P9_TFLUSH) | ||
551 | return req; | ||
552 | again: | ||
553 | P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag); | 527 | P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag); |
554 | err = wait_event_interruptible(*req->wq, | 528 | err = wait_event_interruptible(*req->wq, |
555 | req->status >= REQ_STATUS_RCVD); | 529 | req->status >= REQ_STATUS_RCVD); |
556 | P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d (flushed=%d)\n", | 530 | P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d\n", |
557 | req->wq, tag, err, flushed); | 531 | req->wq, tag, err); |
558 | 532 | ||
559 | if (req->status == REQ_STATUS_ERROR) { | 533 | if (req->status == REQ_STATUS_ERROR) { |
560 | P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); | 534 | P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); |
561 | err = req->t_err; | 535 | err = req->t_err; |
562 | } else if (err == -ERESTARTSYS && flushed) { | ||
563 | P9_DPRINTK(P9_DEBUG_MUX, "flushed - going again\n"); | ||
564 | goto again; | ||
565 | } else if (req->status == REQ_STATUS_FLSHD) { | ||
566 | P9_DPRINTK(P9_DEBUG_MUX, "flushed - erestartsys\n"); | ||
567 | err = -ERESTARTSYS; | ||
568 | } | 536 | } |
569 | 537 | ||
570 | if ((err == -ERESTARTSYS) && (c->status == Connected) && (!flushed)) { | 538 | if ((err == -ERESTARTSYS) && (c->status == Connected)) { |
571 | P9_DPRINTK(P9_DEBUG_MUX, "flushing\n"); | 539 | P9_DPRINTK(P9_DEBUG_MUX, "flushing\n"); |
572 | spin_lock_irqsave(&c->lock, flags); | ||
573 | if (req->status == REQ_STATUS_SENT) | ||
574 | req->status = REQ_STATUS_FLSH; | ||
575 | spin_unlock_irqrestore(&c->lock, flags); | ||
576 | sigpending = 1; | 540 | sigpending = 1; |
577 | flushed = 1; | ||
578 | clear_thread_flag(TIF_SIGPENDING); | 541 | clear_thread_flag(TIF_SIGPENDING); |
579 | 542 | ||
580 | if (c->trans_mod->cancel(c, req)) { | 543 | if (c->trans_mod->cancel(c, req)) |
581 | err = p9_client_flush(c, req); | 544 | p9_client_flush(c, req); |
582 | if (err == 0) | 545 | |
583 | goto again; | 546 | /* if we received the response anyway, don't signal error */ |
584 | } | 547 | if (req->status == REQ_STATUS_RCVD) |
548 | err = 0; | ||
585 | } | 549 | } |
586 | 550 | ||
587 | if (sigpending) { | 551 | if (sigpending) { |