aboutsummaryrefslogtreecommitdiffstats
path: root/fs/9p/mux.c
diff options
context:
space:
mode:
authorLatchesar Ionkov <lucho@ionkov.net>2006-05-15 12:44:21 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-05-15 14:20:56 -0400
commit41e5a6ac80c600e1f8bda0a4871f0b797e097d78 (patch)
treee2796bac0d285751d027eff931e31c0842669788 /fs/9p/mux.c
parent343f1fe6f2e3fb4912db241e639b0721c2e14f2e (diff)
[PATCH] v9fs: signal handling fixes
Multiple races can happen when v9fs is interrupted by a signal and Tflush message is sent to the server. After v9fs sends Tflush it doesn't wait until it receives Rflush, and possibly the response of the original message. This behavior may confuse v9fs what fids are allocated by the file server. This patch fixes the races and the fid allocation. Signed-off-by: Latchesar Ionkov <lucho@ionkov.net> Cc: Eric Van Hensbergen <ericvh@hera.kernel.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/9p/mux.c')
-rw-r--r--fs/9p/mux.c222
1 files changed, 131 insertions, 91 deletions
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index 3e5b124a7212..f4407eb276c7 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -50,15 +50,23 @@ enum {
50 Wpending = 8, /* can write */ 50 Wpending = 8, /* can write */
51}; 51};
52 52
53enum {
54 None,
55 Flushing,
56 Flushed,
57};
58
53struct v9fs_mux_poll_task; 59struct v9fs_mux_poll_task;
54 60
55struct v9fs_req { 61struct v9fs_req {
62 spinlock_t lock;
56 int tag; 63 int tag;
57 struct v9fs_fcall *tcall; 64 struct v9fs_fcall *tcall;
58 struct v9fs_fcall *rcall; 65 struct v9fs_fcall *rcall;
59 int err; 66 int err;
60 v9fs_mux_req_callback cb; 67 v9fs_mux_req_callback cb;
61 void *cba; 68 void *cba;
69 int flush;
62 struct list_head req_list; 70 struct list_head req_list;
63}; 71};
64 72
@@ -96,8 +104,8 @@ struct v9fs_mux_poll_task {
96 104
97struct v9fs_mux_rpc { 105struct v9fs_mux_rpc {
98 struct v9fs_mux_data *m; 106 struct v9fs_mux_data *m;
99 struct v9fs_req *req;
100 int err; 107 int err;
108 struct v9fs_fcall *tcall;
101 struct v9fs_fcall *rcall; 109 struct v9fs_fcall *rcall;
102 wait_queue_head_t wqueue; 110 wait_queue_head_t wqueue;
103}; 111};
@@ -524,10 +532,9 @@ again:
524 532
525static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) 533static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
526{ 534{
527 int ecode, tag; 535 int ecode;
528 struct v9fs_str *ename; 536 struct v9fs_str *ename;
529 537
530 tag = req->tag;
531 if (!req->err && req->rcall->id == RERROR) { 538 if (!req->err && req->rcall->id == RERROR) {
532 ecode = req->rcall->params.rerror.errno; 539 ecode = req->rcall->params.rerror.errno;
533 ename = &req->rcall->params.rerror.error; 540 ename = &req->rcall->params.rerror.error;
@@ -553,23 +560,6 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
553 if (!req->err) 560 if (!req->err)
554 req->err = -EIO; 561 req->err = -EIO;
555 } 562 }
556
557 if (req->err == ERREQFLUSH)
558 return;
559
560 if (req->cb) {
561 dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n",
562 req->tcall, req->rcall);
563
564 (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
565 req->cb = NULL;
566 } else
567 kfree(req->rcall);
568
569 v9fs_mux_put_tag(m, tag);
570
571 wake_up(&m->equeue);
572 kfree(req);
573} 563}
574 564
575/** 565/**
@@ -669,17 +659,26 @@ static void v9fs_read_work(void *a)
669 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { 659 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
670 if (rreq->tag == rcall->tag) { 660 if (rreq->tag == rcall->tag) {
671 req = rreq; 661 req = rreq;
672 req->rcall = rcall; 662 if (req->flush != Flushing)
673 list_del(&req->req_list); 663 list_del(&req->req_list);
674 spin_unlock(&m->lock);
675 process_request(m, req);
676 break; 664 break;
677 } 665 }
678
679 } 666 }
667 spin_unlock(&m->lock);
680 668
681 if (!req) { 669 if (req) {
682 spin_unlock(&m->lock); 670 req->rcall = rcall;
671 process_request(m, req);
672
673 if (req->flush != Flushing) {
674 if (req->cb)
675 (*req->cb) (req, req->cba);
676 else
677 kfree(req->rcall);
678
679 wake_up(&m->equeue);
680 }
681 } else {
683 if (err >= 0 && rcall->id != RFLUSH) 682 if (err >= 0 && rcall->id != RFLUSH)
684 dprintk(DEBUG_ERROR, 683 dprintk(DEBUG_ERROR,
685 "unexpected response mux %p id %d tag %d\n", 684 "unexpected response mux %p id %d tag %d\n",
@@ -746,7 +745,6 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
746 return ERR_PTR(-ENOMEM); 745 return ERR_PTR(-ENOMEM);
747 746
748 v9fs_set_tag(tc, n); 747 v9fs_set_tag(tc, n);
749
750 if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) { 748 if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) {
751 char buf[150]; 749 char buf[150];
752 750
@@ -754,12 +752,14 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
754 printk(KERN_NOTICE "<<< %p %s\n", m, buf); 752 printk(KERN_NOTICE "<<< %p %s\n", m, buf);
755 } 753 }
756 754
755 spin_lock_init(&req->lock);
757 req->tag = n; 756 req->tag = n;
758 req->tcall = tc; 757 req->tcall = tc;
759 req->rcall = NULL; 758 req->rcall = NULL;
760 req->err = 0; 759 req->err = 0;
761 req->cb = cb; 760 req->cb = cb;
762 req->cba = cba; 761 req->cba = cba;
762 req->flush = None;
763 763
764 spin_lock(&m->lock); 764 spin_lock(&m->lock);
765 list_add_tail(&req->req_list, &m->unsent_req_list); 765 list_add_tail(&req->req_list, &m->unsent_req_list);
@@ -776,72 +776,108 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
776 return req; 776 return req;
777} 777}
778 778
779static void v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc, 779static void v9fs_mux_free_request(struct v9fs_mux_data *m, struct v9fs_req *req)
780 struct v9fs_fcall *rc, int err) 780{
781 v9fs_mux_put_tag(m, req->tag);
782 kfree(req);
783}
784
785static void v9fs_mux_flush_cb(struct v9fs_req *freq, void *a)
781{ 786{
782 v9fs_mux_req_callback cb; 787 v9fs_mux_req_callback cb;
783 int tag; 788 int tag;
784 struct v9fs_mux_data *m; 789 struct v9fs_mux_data *m;
785 struct v9fs_req *req, *rptr; 790 struct v9fs_req *req, *rreq, *rptr;
786 791
787 m = a; 792 m = a;
788 dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc, 793 dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
789 rc, err, tc->params.tflush.oldtag); 794 freq->tcall, freq->rcall, freq->err,
795 freq->tcall->params.tflush.oldtag);
790 796
791 spin_lock(&m->lock); 797 spin_lock(&m->lock);
792 cb = NULL; 798 cb = NULL;
793 tag = tc->params.tflush.oldtag; 799 tag = freq->tcall->params.tflush.oldtag;
794 list_for_each_entry_safe(req, rptr, &m->req_list, req_list) { 800 req = NULL;
795 if (req->tag == tag) { 801 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
802 if (rreq->tag == tag) {
803 req = rreq;
796 list_del(&req->req_list); 804 list_del(&req->req_list);
797 if (req->cb) {
798 cb = req->cb;
799 req->cb = NULL;
800 spin_unlock(&m->lock);
801 (*cb) (req->cba, req->tcall, req->rcall,
802 req->err);
803 }
804 kfree(req);
805 wake_up(&m->equeue);
806 break; 805 break;
807 } 806 }
808 } 807 }
808 spin_unlock(&m->lock);
809 809
810 if (!cb) 810 if (req) {
811 spin_unlock(&m->lock); 811 spin_lock(&req->lock);
812 req->flush = Flushed;
813 spin_unlock(&req->lock);
814
815 if (req->cb)
816 (*req->cb) (req, req->cba);
817 else
818 kfree(req->rcall);
819
820 wake_up(&m->equeue);
821 }
812 822
813 v9fs_mux_put_tag(m, tag); 823 kfree(freq->tcall);
814 kfree(tc); 824 kfree(freq->rcall);
815 kfree(rc); 825 v9fs_mux_free_request(m, freq);
816} 826}
817 827
818static void 828static int
819v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req) 829v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req)
820{ 830{
821 struct v9fs_fcall *fc; 831 struct v9fs_fcall *fc;
832 struct v9fs_req *rreq, *rptr;
822 833
823 dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); 834 dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
824 835
836 /* if a response was received for a request, do nothing */
837 spin_lock(&req->lock);
838 if (req->rcall || req->err) {
839 spin_unlock(&req->lock);
840 dprintk(DEBUG_MUX, "mux %p req %p response already received\n", m, req);
841 return 0;
842 }
843
844 req->flush = Flushing;
845 spin_unlock(&req->lock);
846
847 spin_lock(&m->lock);
848 /* if the request is not sent yet, just remove it from the list */
849 list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
850 if (rreq->tag == req->tag) {
851 dprintk(DEBUG_MUX, "mux %p req %p request is not sent yet\n", m, req);
852 list_del(&rreq->req_list);
853 req->flush = Flushed;
854 spin_unlock(&m->lock);
855 if (req->cb)
856 (*req->cb) (req, req->cba);
857 return 0;
858 }
859 }
860 spin_unlock(&m->lock);
861
862 clear_thread_flag(TIF_SIGPENDING);
825 fc = v9fs_create_tflush(req->tag); 863 fc = v9fs_create_tflush(req->tag);
826 v9fs_send_request(m, fc, v9fs_mux_flush_cb, m); 864 v9fs_send_request(m, fc, v9fs_mux_flush_cb, m);
865 return 1;
827} 866}
828 867
829static void 868static void
830v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err) 869v9fs_mux_rpc_cb(struct v9fs_req *req, void *a)
831{ 870{
832 struct v9fs_mux_rpc *r; 871 struct v9fs_mux_rpc *r;
833 872
834 if (err == ERREQFLUSH) { 873 dprintk(DEBUG_MUX, "req %p r %p\n", req, a);
835 kfree(rc);
836 dprintk(DEBUG_MUX, "err req flush\n");
837 return;
838 }
839
840 r = a; 874 r = a;
841 dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req, 875 r->rcall = req->rcall;
842 tc, rc, err); 876 r->err = req->err;
843 r->rcall = rc; 877
844 r->err = err; 878 if (req->flush!=None && !req->err)
879 r->err = -ERESTARTSYS;
880
845 wake_up(&r->wqueue); 881 wake_up(&r->wqueue);
846} 882}
847 883
@@ -856,12 +892,13 @@ int
856v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, 892v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
857 struct v9fs_fcall **rc) 893 struct v9fs_fcall **rc)
858{ 894{
859 int err; 895 int err, sigpending;
860 unsigned long flags; 896 unsigned long flags;
861 struct v9fs_req *req; 897 struct v9fs_req *req;
862 struct v9fs_mux_rpc r; 898 struct v9fs_mux_rpc r;
863 899
864 r.err = 0; 900 r.err = 0;
901 r.tcall = tc;
865 r.rcall = NULL; 902 r.rcall = NULL;
866 r.m = m; 903 r.m = m;
867 init_waitqueue_head(&r.wqueue); 904 init_waitqueue_head(&r.wqueue);
@@ -869,48 +906,50 @@ v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
869 if (rc) 906 if (rc)
870 *rc = NULL; 907 *rc = NULL;
871 908
909 sigpending = 0;
910 if (signal_pending(current)) {
911 sigpending = 1;
912 clear_thread_flag(TIF_SIGPENDING);
913 }
914
872 req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r); 915 req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r);
873 if (IS_ERR(req)) { 916 if (IS_ERR(req)) {
874 err = PTR_ERR(req); 917 err = PTR_ERR(req);
875 dprintk(DEBUG_MUX, "error %d\n", err); 918 dprintk(DEBUG_MUX, "error %d\n", err);
876 return PTR_ERR(req); 919 return err;
877 } 920 }
878 921
879 r.req = req;
880 dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc,
881 req->tag, &r, req);
882 err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); 922 err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
883 if (r.err < 0) 923 if (r.err < 0)
884 err = r.err; 924 err = r.err;
885 925
886 if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) { 926 if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) {
887 spin_lock(&m->lock); 927 if (v9fs_mux_flush_request(m, req)) {
888 req->tcall = NULL; 928 /* wait until we get response of the flush message */
889 req->err = ERREQFLUSH; 929 do {
890 spin_unlock(&m->lock); 930 clear_thread_flag(TIF_SIGPENDING);
931 err = wait_event_interruptible(r.wqueue,
932 r.rcall || r.err);
933 } while (!r.rcall && !r.err && err==-ERESTARTSYS &&
934 m->trans->status==Connected && !m->err);
935 }
936 sigpending = 1;
937 }
891 938
892 clear_thread_flag(TIF_SIGPENDING); 939 if (sigpending) {
893 v9fs_mux_flush_request(m, req);
894 spin_lock_irqsave(&current->sighand->siglock, flags); 940 spin_lock_irqsave(&current->sighand->siglock, flags);
895 recalc_sigpending(); 941 recalc_sigpending();
896 spin_unlock_irqrestore(&current->sighand->siglock, flags); 942 spin_unlock_irqrestore(&current->sighand->siglock, flags);
897 } 943 }
898 944
899 if (!err) { 945 if (rc)
900 if (r.rcall) 946 *rc = r.rcall;
901 dprintk(DEBUG_MUX, "got response id %d tag %d\n", 947 else
902 r.rcall->id, r.rcall->tag);
903
904 if (rc)
905 *rc = r.rcall;
906 else
907 kfree(r.rcall);
908 } else {
909 kfree(r.rcall); 948 kfree(r.rcall);
910 dprintk(DEBUG_MUX, "got error %d\n", err); 949
911 if (err > 0) 950 v9fs_mux_free_request(m, req);
912 err = -EIO; 951 if (err > 0)
913 } 952 err = -EIO;
914 953
915 return err; 954 return err;
916} 955}
@@ -951,12 +990,15 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
951 struct v9fs_req *req, *rtmp; 990 struct v9fs_req *req, *rtmp;
952 LIST_HEAD(cancel_list); 991 LIST_HEAD(cancel_list);
953 992
954 dprintk(DEBUG_MUX, "mux %p err %d\n", m, err); 993 dprintk(DEBUG_ERROR, "mux %p err %d\n", m, err);
955 m->err = err; 994 m->err = err;
956 spin_lock(&m->lock); 995 spin_lock(&m->lock);
957 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { 996 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
958 list_move(&req->req_list, &cancel_list); 997 list_move(&req->req_list, &cancel_list);
959 } 998 }
999 list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
1000 list_move(&req->req_list, &cancel_list);
1001 }
960 spin_unlock(&m->lock); 1002 spin_unlock(&m->lock);
961 1003
962 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { 1004 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
@@ -965,11 +1007,9 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
965 req->err = err; 1007 req->err = err;
966 1008
967 if (req->cb) 1009 if (req->cb)
968 (*req->cb) (req->cba, req->tcall, req->rcall, req->err); 1010 (*req->cb) (req, req->cba);
969 else 1011 else
970 kfree(req->rcall); 1012 kfree(req->rcall);
971
972 kfree(req);
973 } 1013 }
974 1014
975 wake_up(&m->equeue); 1015 wake_up(&m->equeue);