diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/9p/fcall.c | 21 | ||||
-rw-r--r-- | fs/9p/mux.c | 222 | ||||
-rw-r--r-- | fs/9p/mux.h | 4 | ||||
-rw-r--r-- | fs/9p/vfs_file.c | 13 | ||||
-rw-r--r-- | fs/9p/vfs_inode.c | 19 | ||||
-rw-r--r-- | fs/Makefile | 2 | ||||
-rw-r--r-- | fs/autofs4/autofs_i.h | 5 | ||||
-rw-r--r-- | fs/autofs4/root.c | 10 | ||||
-rw-r--r-- | fs/autofs4/waitq.c | 77 | ||||
-rw-r--r-- | fs/binfmt_flat.c | 30 | ||||
-rw-r--r-- | fs/bio.c | 3 | ||||
-rw-r--r-- | fs/compat.c | 177 | ||||
-rw-r--r-- | fs/configfs/dir.c | 137 | ||||
-rw-r--r-- | fs/exportfs/expfs.c | 2 | ||||
-rw-r--r-- | fs/inotify.c | 9 | ||||
-rw-r--r-- | fs/jffs2/nodelist.c | 6 | ||||
-rw-r--r-- | fs/jfs/jfs_metapage.c | 20 | ||||
-rw-r--r-- | fs/namespace.c | 7 | ||||
-rw-r--r-- | fs/nfsd/export.c | 4 | ||||
-rw-r--r-- | fs/nfsd/vfs.c | 7 | ||||
-rw-r--r-- | fs/ocfs2/aops.c | 46 | ||||
-rw-r--r-- | fs/ocfs2/aops.h | 4 | ||||
-rw-r--r-- | fs/ocfs2/extent_map.c | 6 | ||||
-rw-r--r-- | fs/ocfs2/file.c | 86 | ||||
-rw-r--r-- | fs/ocfs2/journal.c | 8 | ||||
-rw-r--r-- | fs/ocfs2/uptodate.c | 4 | ||||
-rw-r--r-- | fs/ocfs2/vote.c | 6 | ||||
-rw-r--r-- | fs/open.c | 1 | ||||
-rw-r--r-- | fs/partitions/check.c | 3 | ||||
-rw-r--r-- | fs/smbfs/dir.c | 5 | ||||
-rw-r--r-- | fs/smbfs/request.c | 4 |
31 files changed, 576 insertions, 372 deletions
diff --git a/fs/9p/fcall.c b/fs/9p/fcall.c index 71742ba150c4..6f2617820a4e 100644 --- a/fs/9p/fcall.c +++ b/fs/9p/fcall.c | |||
@@ -98,23 +98,20 @@ v9fs_t_attach(struct v9fs_session_info *v9ses, char *uname, char *aname, | |||
98 | static void v9fs_t_clunk_cb(void *a, struct v9fs_fcall *tc, | 98 | static void v9fs_t_clunk_cb(void *a, struct v9fs_fcall *tc, |
99 | struct v9fs_fcall *rc, int err) | 99 | struct v9fs_fcall *rc, int err) |
100 | { | 100 | { |
101 | int fid; | 101 | int fid, id; |
102 | struct v9fs_session_info *v9ses; | 102 | struct v9fs_session_info *v9ses; |
103 | 103 | ||
104 | if (err) | 104 | id = 0; |
105 | return; | ||
106 | |||
107 | fid = tc->params.tclunk.fid; | 105 | fid = tc->params.tclunk.fid; |
108 | kfree(tc); | 106 | if (rc) |
109 | 107 | id = rc->id; | |
110 | if (!rc) | ||
111 | return; | ||
112 | |||
113 | v9ses = a; | ||
114 | if (rc->id == RCLUNK) | ||
115 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
116 | 108 | ||
109 | kfree(tc); | ||
117 | kfree(rc); | 110 | kfree(rc); |
111 | if (id == RCLUNK) { | ||
112 | v9ses = a; | ||
113 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
114 | } | ||
118 | } | 115 | } |
119 | 116 | ||
120 | /** | 117 | /** |
diff --git a/fs/9p/mux.c b/fs/9p/mux.c index 3e5b124a7212..f4407eb276c7 100644 --- a/fs/9p/mux.c +++ b/fs/9p/mux.c | |||
@@ -50,15 +50,23 @@ enum { | |||
50 | Wpending = 8, /* can write */ | 50 | Wpending = 8, /* can write */ |
51 | }; | 51 | }; |
52 | 52 | ||
53 | enum { | ||
54 | None, | ||
55 | Flushing, | ||
56 | Flushed, | ||
57 | }; | ||
58 | |||
53 | struct v9fs_mux_poll_task; | 59 | struct v9fs_mux_poll_task; |
54 | 60 | ||
55 | struct v9fs_req { | 61 | struct v9fs_req { |
62 | spinlock_t lock; | ||
56 | int tag; | 63 | int tag; |
57 | struct v9fs_fcall *tcall; | 64 | struct v9fs_fcall *tcall; |
58 | struct v9fs_fcall *rcall; | 65 | struct v9fs_fcall *rcall; |
59 | int err; | 66 | int err; |
60 | v9fs_mux_req_callback cb; | 67 | v9fs_mux_req_callback cb; |
61 | void *cba; | 68 | void *cba; |
69 | int flush; | ||
62 | struct list_head req_list; | 70 | struct list_head req_list; |
63 | }; | 71 | }; |
64 | 72 | ||
@@ -96,8 +104,8 @@ struct v9fs_mux_poll_task { | |||
96 | 104 | ||
97 | struct v9fs_mux_rpc { | 105 | struct v9fs_mux_rpc { |
98 | struct v9fs_mux_data *m; | 106 | struct v9fs_mux_data *m; |
99 | struct v9fs_req *req; | ||
100 | int err; | 107 | int err; |
108 | struct v9fs_fcall *tcall; | ||
101 | struct v9fs_fcall *rcall; | 109 | struct v9fs_fcall *rcall; |
102 | wait_queue_head_t wqueue; | 110 | wait_queue_head_t wqueue; |
103 | }; | 111 | }; |
@@ -524,10 +532,9 @@ again: | |||
524 | 532 | ||
525 | static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) | 533 | static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) |
526 | { | 534 | { |
527 | int ecode, tag; | 535 | int ecode; |
528 | struct v9fs_str *ename; | 536 | struct v9fs_str *ename; |
529 | 537 | ||
530 | tag = req->tag; | ||
531 | if (!req->err && req->rcall->id == RERROR) { | 538 | if (!req->err && req->rcall->id == RERROR) { |
532 | ecode = req->rcall->params.rerror.errno; | 539 | ecode = req->rcall->params.rerror.errno; |
533 | ename = &req->rcall->params.rerror.error; | 540 | ename = &req->rcall->params.rerror.error; |
@@ -553,23 +560,6 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) | |||
553 | if (!req->err) | 560 | if (!req->err) |
554 | req->err = -EIO; | 561 | req->err = -EIO; |
555 | } | 562 | } |
556 | |||
557 | if (req->err == ERREQFLUSH) | ||
558 | return; | ||
559 | |||
560 | if (req->cb) { | ||
561 | dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n", | ||
562 | req->tcall, req->rcall); | ||
563 | |||
564 | (*req->cb) (req->cba, req->tcall, req->rcall, req->err); | ||
565 | req->cb = NULL; | ||
566 | } else | ||
567 | kfree(req->rcall); | ||
568 | |||
569 | v9fs_mux_put_tag(m, tag); | ||
570 | |||
571 | wake_up(&m->equeue); | ||
572 | kfree(req); | ||
573 | } | 563 | } |
574 | 564 | ||
575 | /** | 565 | /** |
@@ -669,17 +659,26 @@ static void v9fs_read_work(void *a) | |||
669 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | 659 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { |
670 | if (rreq->tag == rcall->tag) { | 660 | if (rreq->tag == rcall->tag) { |
671 | req = rreq; | 661 | req = rreq; |
672 | req->rcall = rcall; | 662 | if (req->flush != Flushing) |
673 | list_del(&req->req_list); | 663 | list_del(&req->req_list); |
674 | spin_unlock(&m->lock); | ||
675 | process_request(m, req); | ||
676 | break; | 664 | break; |
677 | } | 665 | } |
678 | |||
679 | } | 666 | } |
667 | spin_unlock(&m->lock); | ||
680 | 668 | ||
681 | if (!req) { | 669 | if (req) { |
682 | spin_unlock(&m->lock); | 670 | req->rcall = rcall; |
671 | process_request(m, req); | ||
672 | |||
673 | if (req->flush != Flushing) { | ||
674 | if (req->cb) | ||
675 | (*req->cb) (req, req->cba); | ||
676 | else | ||
677 | kfree(req->rcall); | ||
678 | |||
679 | wake_up(&m->equeue); | ||
680 | } | ||
681 | } else { | ||
683 | if (err >= 0 && rcall->id != RFLUSH) | 682 | if (err >= 0 && rcall->id != RFLUSH) |
684 | dprintk(DEBUG_ERROR, | 683 | dprintk(DEBUG_ERROR, |
685 | "unexpected response mux %p id %d tag %d\n", | 684 | "unexpected response mux %p id %d tag %d\n", |
@@ -746,7 +745,6 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m, | |||
746 | return ERR_PTR(-ENOMEM); | 745 | return ERR_PTR(-ENOMEM); |
747 | 746 | ||
748 | v9fs_set_tag(tc, n); | 747 | v9fs_set_tag(tc, n); |
749 | |||
750 | if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) { | 748 | if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) { |
751 | char buf[150]; | 749 | char buf[150]; |
752 | 750 | ||
@@ -754,12 +752,14 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m, | |||
754 | printk(KERN_NOTICE "<<< %p %s\n", m, buf); | 752 | printk(KERN_NOTICE "<<< %p %s\n", m, buf); |
755 | } | 753 | } |
756 | 754 | ||
755 | spin_lock_init(&req->lock); | ||
757 | req->tag = n; | 756 | req->tag = n; |
758 | req->tcall = tc; | 757 | req->tcall = tc; |
759 | req->rcall = NULL; | 758 | req->rcall = NULL; |
760 | req->err = 0; | 759 | req->err = 0; |
761 | req->cb = cb; | 760 | req->cb = cb; |
762 | req->cba = cba; | 761 | req->cba = cba; |
762 | req->flush = None; | ||
763 | 763 | ||
764 | spin_lock(&m->lock); | 764 | spin_lock(&m->lock); |
765 | list_add_tail(&req->req_list, &m->unsent_req_list); | 765 | list_add_tail(&req->req_list, &m->unsent_req_list); |
@@ -776,72 +776,108 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m, | |||
776 | return req; | 776 | return req; |
777 | } | 777 | } |
778 | 778 | ||
779 | static void v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc, | 779 | static void v9fs_mux_free_request(struct v9fs_mux_data *m, struct v9fs_req *req) |
780 | struct v9fs_fcall *rc, int err) | 780 | { |
781 | v9fs_mux_put_tag(m, req->tag); | ||
782 | kfree(req); | ||
783 | } | ||
784 | |||
785 | static void v9fs_mux_flush_cb(struct v9fs_req *freq, void *a) | ||
781 | { | 786 | { |
782 | v9fs_mux_req_callback cb; | 787 | v9fs_mux_req_callback cb; |
783 | int tag; | 788 | int tag; |
784 | struct v9fs_mux_data *m; | 789 | struct v9fs_mux_data *m; |
785 | struct v9fs_req *req, *rptr; | 790 | struct v9fs_req *req, *rreq, *rptr; |
786 | 791 | ||
787 | m = a; | 792 | m = a; |
788 | dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc, | 793 | dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, |
789 | rc, err, tc->params.tflush.oldtag); | 794 | freq->tcall, freq->rcall, freq->err, |
795 | freq->tcall->params.tflush.oldtag); | ||
790 | 796 | ||
791 | spin_lock(&m->lock); | 797 | spin_lock(&m->lock); |
792 | cb = NULL; | 798 | cb = NULL; |
793 | tag = tc->params.tflush.oldtag; | 799 | tag = freq->tcall->params.tflush.oldtag; |
794 | list_for_each_entry_safe(req, rptr, &m->req_list, req_list) { | 800 | req = NULL; |
795 | if (req->tag == tag) { | 801 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { |
802 | if (rreq->tag == tag) { | ||
803 | req = rreq; | ||
796 | list_del(&req->req_list); | 804 | list_del(&req->req_list); |
797 | if (req->cb) { | ||
798 | cb = req->cb; | ||
799 | req->cb = NULL; | ||
800 | spin_unlock(&m->lock); | ||
801 | (*cb) (req->cba, req->tcall, req->rcall, | ||
802 | req->err); | ||
803 | } | ||
804 | kfree(req); | ||
805 | wake_up(&m->equeue); | ||
806 | break; | 805 | break; |
807 | } | 806 | } |
808 | } | 807 | } |
808 | spin_unlock(&m->lock); | ||
809 | 809 | ||
810 | if (!cb) | 810 | if (req) { |
811 | spin_unlock(&m->lock); | 811 | spin_lock(&req->lock); |
812 | req->flush = Flushed; | ||
813 | spin_unlock(&req->lock); | ||
814 | |||
815 | if (req->cb) | ||
816 | (*req->cb) (req, req->cba); | ||
817 | else | ||
818 | kfree(req->rcall); | ||
819 | |||
820 | wake_up(&m->equeue); | ||
821 | } | ||
812 | 822 | ||
813 | v9fs_mux_put_tag(m, tag); | 823 | kfree(freq->tcall); |
814 | kfree(tc); | 824 | kfree(freq->rcall); |
815 | kfree(rc); | 825 | v9fs_mux_free_request(m, freq); |
816 | } | 826 | } |
817 | 827 | ||
818 | static void | 828 | static int |
819 | v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req) | 829 | v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req) |
820 | { | 830 | { |
821 | struct v9fs_fcall *fc; | 831 | struct v9fs_fcall *fc; |
832 | struct v9fs_req *rreq, *rptr; | ||
822 | 833 | ||
823 | dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); | 834 | dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); |
824 | 835 | ||
836 | /* if a response was received for a request, do nothing */ | ||
837 | spin_lock(&req->lock); | ||
838 | if (req->rcall || req->err) { | ||
839 | spin_unlock(&req->lock); | ||
840 | dprintk(DEBUG_MUX, "mux %p req %p response already received\n", m, req); | ||
841 | return 0; | ||
842 | } | ||
843 | |||
844 | req->flush = Flushing; | ||
845 | spin_unlock(&req->lock); | ||
846 | |||
847 | spin_lock(&m->lock); | ||
848 | /* if the request is not sent yet, just remove it from the list */ | ||
849 | list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) { | ||
850 | if (rreq->tag == req->tag) { | ||
851 | dprintk(DEBUG_MUX, "mux %p req %p request is not sent yet\n", m, req); | ||
852 | list_del(&rreq->req_list); | ||
853 | req->flush = Flushed; | ||
854 | spin_unlock(&m->lock); | ||
855 | if (req->cb) | ||
856 | (*req->cb) (req, req->cba); | ||
857 | return 0; | ||
858 | } | ||
859 | } | ||
860 | spin_unlock(&m->lock); | ||
861 | |||
862 | clear_thread_flag(TIF_SIGPENDING); | ||
825 | fc = v9fs_create_tflush(req->tag); | 863 | fc = v9fs_create_tflush(req->tag); |
826 | v9fs_send_request(m, fc, v9fs_mux_flush_cb, m); | 864 | v9fs_send_request(m, fc, v9fs_mux_flush_cb, m); |
865 | return 1; | ||
827 | } | 866 | } |
828 | 867 | ||
829 | static void | 868 | static void |
830 | v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err) | 869 | v9fs_mux_rpc_cb(struct v9fs_req *req, void *a) |
831 | { | 870 | { |
832 | struct v9fs_mux_rpc *r; | 871 | struct v9fs_mux_rpc *r; |
833 | 872 | ||
834 | if (err == ERREQFLUSH) { | 873 | dprintk(DEBUG_MUX, "req %p r %p\n", req, a); |
835 | kfree(rc); | ||
836 | dprintk(DEBUG_MUX, "err req flush\n"); | ||
837 | return; | ||
838 | } | ||
839 | |||
840 | r = a; | 874 | r = a; |
841 | dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req, | 875 | r->rcall = req->rcall; |
842 | tc, rc, err); | 876 | r->err = req->err; |
843 | r->rcall = rc; | 877 | |
844 | r->err = err; | 878 | if (req->flush!=None && !req->err) |
879 | r->err = -ERESTARTSYS; | ||
880 | |||
845 | wake_up(&r->wqueue); | 881 | wake_up(&r->wqueue); |
846 | } | 882 | } |
847 | 883 | ||
@@ -856,12 +892,13 @@ int | |||
856 | v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, | 892 | v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, |
857 | struct v9fs_fcall **rc) | 893 | struct v9fs_fcall **rc) |
858 | { | 894 | { |
859 | int err; | 895 | int err, sigpending; |
860 | unsigned long flags; | 896 | unsigned long flags; |
861 | struct v9fs_req *req; | 897 | struct v9fs_req *req; |
862 | struct v9fs_mux_rpc r; | 898 | struct v9fs_mux_rpc r; |
863 | 899 | ||
864 | r.err = 0; | 900 | r.err = 0; |
901 | r.tcall = tc; | ||
865 | r.rcall = NULL; | 902 | r.rcall = NULL; |
866 | r.m = m; | 903 | r.m = m; |
867 | init_waitqueue_head(&r.wqueue); | 904 | init_waitqueue_head(&r.wqueue); |
@@ -869,48 +906,50 @@ v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, | |||
869 | if (rc) | 906 | if (rc) |
870 | *rc = NULL; | 907 | *rc = NULL; |
871 | 908 | ||
909 | sigpending = 0; | ||
910 | if (signal_pending(current)) { | ||
911 | sigpending = 1; | ||
912 | clear_thread_flag(TIF_SIGPENDING); | ||
913 | } | ||
914 | |||
872 | req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r); | 915 | req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r); |
873 | if (IS_ERR(req)) { | 916 | if (IS_ERR(req)) { |
874 | err = PTR_ERR(req); | 917 | err = PTR_ERR(req); |
875 | dprintk(DEBUG_MUX, "error %d\n", err); | 918 | dprintk(DEBUG_MUX, "error %d\n", err); |
876 | return PTR_ERR(req); | 919 | return err; |
877 | } | 920 | } |
878 | 921 | ||
879 | r.req = req; | ||
880 | dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc, | ||
881 | req->tag, &r, req); | ||
882 | err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); | 922 | err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); |
883 | if (r.err < 0) | 923 | if (r.err < 0) |
884 | err = r.err; | 924 | err = r.err; |
885 | 925 | ||
886 | if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) { | 926 | if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) { |
887 | spin_lock(&m->lock); | 927 | if (v9fs_mux_flush_request(m, req)) { |
888 | req->tcall = NULL; | 928 | /* wait until we get response of the flush message */ |
889 | req->err = ERREQFLUSH; | 929 | do { |
890 | spin_unlock(&m->lock); | 930 | clear_thread_flag(TIF_SIGPENDING); |
931 | err = wait_event_interruptible(r.wqueue, | ||
932 | r.rcall || r.err); | ||
933 | } while (!r.rcall && !r.err && err==-ERESTARTSYS && | ||
934 | m->trans->status==Connected && !m->err); | ||
935 | } | ||
936 | sigpending = 1; | ||
937 | } | ||
891 | 938 | ||
892 | clear_thread_flag(TIF_SIGPENDING); | 939 | if (sigpending) { |
893 | v9fs_mux_flush_request(m, req); | ||
894 | spin_lock_irqsave(¤t->sighand->siglock, flags); | 940 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
895 | recalc_sigpending(); | 941 | recalc_sigpending(); |
896 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 942 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
897 | } | 943 | } |
898 | 944 | ||
899 | if (!err) { | 945 | if (rc) |
900 | if (r.rcall) | 946 | *rc = r.rcall; |
901 | dprintk(DEBUG_MUX, "got response id %d tag %d\n", | 947 | else |
902 | r.rcall->id, r.rcall->tag); | ||
903 | |||
904 | if (rc) | ||
905 | *rc = r.rcall; | ||
906 | else | ||
907 | kfree(r.rcall); | ||
908 | } else { | ||
909 | kfree(r.rcall); | 948 | kfree(r.rcall); |
910 | dprintk(DEBUG_MUX, "got error %d\n", err); | 949 | |
911 | if (err > 0) | 950 | v9fs_mux_free_request(m, req); |
912 | err = -EIO; | 951 | if (err > 0) |
913 | } | 952 | err = -EIO; |
914 | 953 | ||
915 | return err; | 954 | return err; |
916 | } | 955 | } |
@@ -951,12 +990,15 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err) | |||
951 | struct v9fs_req *req, *rtmp; | 990 | struct v9fs_req *req, *rtmp; |
952 | LIST_HEAD(cancel_list); | 991 | LIST_HEAD(cancel_list); |
953 | 992 | ||
954 | dprintk(DEBUG_MUX, "mux %p err %d\n", m, err); | 993 | dprintk(DEBUG_ERROR, "mux %p err %d\n", m, err); |
955 | m->err = err; | 994 | m->err = err; |
956 | spin_lock(&m->lock); | 995 | spin_lock(&m->lock); |
957 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { | 996 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { |
958 | list_move(&req->req_list, &cancel_list); | 997 | list_move(&req->req_list, &cancel_list); |
959 | } | 998 | } |
999 | list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { | ||
1000 | list_move(&req->req_list, &cancel_list); | ||
1001 | } | ||
960 | spin_unlock(&m->lock); | 1002 | spin_unlock(&m->lock); |
961 | 1003 | ||
962 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { | 1004 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { |
@@ -965,11 +1007,9 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err) | |||
965 | req->err = err; | 1007 | req->err = err; |
966 | 1008 | ||
967 | if (req->cb) | 1009 | if (req->cb) |
968 | (*req->cb) (req->cba, req->tcall, req->rcall, req->err); | 1010 | (*req->cb) (req, req->cba); |
969 | else | 1011 | else |
970 | kfree(req->rcall); | 1012 | kfree(req->rcall); |
971 | |||
972 | kfree(req); | ||
973 | } | 1013 | } |
974 | 1014 | ||
975 | wake_up(&m->equeue); | 1015 | wake_up(&m->equeue); |
diff --git a/fs/9p/mux.h b/fs/9p/mux.h index e90bfd32ea42..fb10c50186a1 100644 --- a/fs/9p/mux.h +++ b/fs/9p/mux.h | |||
@@ -24,6 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | struct v9fs_mux_data; | 26 | struct v9fs_mux_data; |
27 | struct v9fs_req; | ||
27 | 28 | ||
28 | /** | 29 | /** |
29 | * v9fs_mux_req_callback - callback function that is called when the | 30 | * v9fs_mux_req_callback - callback function that is called when the |
@@ -36,8 +37,7 @@ struct v9fs_mux_data; | |||
36 | * @rc - response call | 37 | * @rc - response call |
37 | * @err - error code (non-zero if error occured) | 38 | * @err - error code (non-zero if error occured) |
38 | */ | 39 | */ |
39 | typedef void (*v9fs_mux_req_callback)(void *a, struct v9fs_fcall *tc, | 40 | typedef void (*v9fs_mux_req_callback)(struct v9fs_req *req, void *a); |
40 | struct v9fs_fcall *rc, int err); | ||
41 | 41 | ||
42 | int v9fs_mux_global_init(void); | 42 | int v9fs_mux_global_init(void); |
43 | void v9fs_mux_global_exit(void); | 43 | void v9fs_mux_global_exit(void); |
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 083dcfcd158e..1a8e46084f0e 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c | |||
@@ -72,11 +72,17 @@ int v9fs_file_open(struct inode *inode, struct file *file) | |||
72 | return -ENOSPC; | 72 | return -ENOSPC; |
73 | } | 73 | } |
74 | 74 | ||
75 | err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, NULL); | 75 | err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, &fcall); |
76 | if (err < 0) { | 76 | if (err < 0) { |
77 | dprintk(DEBUG_ERROR, "rewalk didn't work\n"); | 77 | dprintk(DEBUG_ERROR, "rewalk didn't work\n"); |
78 | goto put_fid; | 78 | if (fcall && fcall->id == RWALK) |
79 | goto clunk_fid; | ||
80 | else { | ||
81 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
82 | goto free_fcall; | ||
83 | } | ||
79 | } | 84 | } |
85 | kfree(fcall); | ||
80 | 86 | ||
81 | /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */ | 87 | /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */ |
82 | /* translate open mode appropriately */ | 88 | /* translate open mode appropriately */ |
@@ -109,8 +115,7 @@ int v9fs_file_open(struct inode *inode, struct file *file) | |||
109 | clunk_fid: | 115 | clunk_fid: |
110 | v9fs_t_clunk(v9ses, fid); | 116 | v9fs_t_clunk(v9ses, fid); |
111 | 117 | ||
112 | put_fid: | 118 | free_fcall: |
113 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
114 | kfree(fcall); | 119 | kfree(fcall); |
115 | 120 | ||
116 | return err; | 121 | return err; |
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 133db366d306..2cb87ba4b1c1 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
@@ -270,7 +270,10 @@ v9fs_create(struct v9fs_session_info *v9ses, u32 pfid, char *name, u32 perm, | |||
270 | err = v9fs_t_walk(v9ses, pfid, fid, NULL, &fcall); | 270 | err = v9fs_t_walk(v9ses, pfid, fid, NULL, &fcall); |
271 | if (err < 0) { | 271 | if (err < 0) { |
272 | PRINT_FCALL_ERROR("clone error", fcall); | 272 | PRINT_FCALL_ERROR("clone error", fcall); |
273 | goto put_fid; | 273 | if (fcall && fcall->id == RWALK) |
274 | goto clunk_fid; | ||
275 | else | ||
276 | goto put_fid; | ||
274 | } | 277 | } |
275 | kfree(fcall); | 278 | kfree(fcall); |
276 | 279 | ||
@@ -322,6 +325,9 @@ v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry) | |||
322 | &fcall); | 325 | &fcall); |
323 | 326 | ||
324 | if (err < 0) { | 327 | if (err < 0) { |
328 | if (fcall && fcall->id == RWALK) | ||
329 | goto clunk_fid; | ||
330 | |||
325 | PRINT_FCALL_ERROR("walk error", fcall); | 331 | PRINT_FCALL_ERROR("walk error", fcall); |
326 | v9fs_put_idpool(nfid, &v9ses->fidpool); | 332 | v9fs_put_idpool(nfid, &v9ses->fidpool); |
327 | goto error; | 333 | goto error; |
@@ -640,19 +646,26 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, | |||
640 | } | 646 | } |
641 | 647 | ||
642 | result = v9fs_t_walk(v9ses, dirfidnum, newfid, | 648 | result = v9fs_t_walk(v9ses, dirfidnum, newfid, |
643 | (char *)dentry->d_name.name, NULL); | 649 | (char *)dentry->d_name.name, &fcall); |
650 | |||
644 | if (result < 0) { | 651 | if (result < 0) { |
645 | v9fs_put_idpool(newfid, &v9ses->fidpool); | 652 | if (fcall && fcall->id == RWALK) |
653 | v9fs_t_clunk(v9ses, newfid); | ||
654 | else | ||
655 | v9fs_put_idpool(newfid, &v9ses->fidpool); | ||
656 | |||
646 | if (result == -ENOENT) { | 657 | if (result == -ENOENT) { |
647 | d_add(dentry, NULL); | 658 | d_add(dentry, NULL); |
648 | dprintk(DEBUG_VFS, | 659 | dprintk(DEBUG_VFS, |
649 | "Return negative dentry %p count %d\n", | 660 | "Return negative dentry %p count %d\n", |
650 | dentry, atomic_read(&dentry->d_count)); | 661 | dentry, atomic_read(&dentry->d_count)); |
662 | kfree(fcall); | ||
651 | return NULL; | 663 | return NULL; |
652 | } | 664 | } |
653 | dprintk(DEBUG_ERROR, "walk error:%d\n", result); | 665 | dprintk(DEBUG_ERROR, "walk error:%d\n", result); |
654 | goto FreeFcall; | 666 | goto FreeFcall; |
655 | } | 667 | } |
668 | kfree(fcall); | ||
656 | 669 | ||
657 | result = v9fs_t_stat(v9ses, newfid, &fcall); | 670 | result = v9fs_t_stat(v9ses, newfid, &fcall); |
658 | if (result < 0) { | 671 | if (result < 0) { |
diff --git a/fs/Makefile b/fs/Makefile index 2c22e282c777..c731d2c0f409 100644 --- a/fs/Makefile +++ b/fs/Makefile | |||
@@ -45,6 +45,7 @@ obj-$(CONFIG_DNOTIFY) += dnotify.o | |||
45 | obj-$(CONFIG_PROC_FS) += proc/ | 45 | obj-$(CONFIG_PROC_FS) += proc/ |
46 | obj-y += partitions/ | 46 | obj-y += partitions/ |
47 | obj-$(CONFIG_SYSFS) += sysfs/ | 47 | obj-$(CONFIG_SYSFS) += sysfs/ |
48 | obj-$(CONFIG_CONFIGFS_FS) += configfs/ | ||
48 | obj-y += devpts/ | 49 | obj-y += devpts/ |
49 | 50 | ||
50 | obj-$(CONFIG_PROFILING) += dcookies.o | 51 | obj-$(CONFIG_PROFILING) += dcookies.o |
@@ -101,6 +102,5 @@ obj-$(CONFIG_BEFS_FS) += befs/ | |||
101 | obj-$(CONFIG_HOSTFS) += hostfs/ | 102 | obj-$(CONFIG_HOSTFS) += hostfs/ |
102 | obj-$(CONFIG_HPPFS) += hppfs/ | 103 | obj-$(CONFIG_HPPFS) += hppfs/ |
103 | obj-$(CONFIG_DEBUG_FS) += debugfs/ | 104 | obj-$(CONFIG_DEBUG_FS) += debugfs/ |
104 | obj-$(CONFIG_CONFIGFS_FS) += configfs/ | ||
105 | obj-$(CONFIG_OCFS2_FS) += ocfs2/ | 105 | obj-$(CONFIG_OCFS2_FS) += ocfs2/ |
106 | obj-$(CONFIG_GFS2_FS) += gfs2/ | 106 | obj-$(CONFIG_GFS2_FS) += gfs2/ |
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 57c4903614e5..d6603d02304c 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h | |||
@@ -74,8 +74,8 @@ struct autofs_wait_queue { | |||
74 | struct autofs_wait_queue *next; | 74 | struct autofs_wait_queue *next; |
75 | autofs_wqt_t wait_queue_token; | 75 | autofs_wqt_t wait_queue_token; |
76 | /* We use the following to see what we are waiting for */ | 76 | /* We use the following to see what we are waiting for */ |
77 | int hash; | 77 | unsigned int hash; |
78 | int len; | 78 | unsigned int len; |
79 | char *name; | 79 | char *name; |
80 | u32 dev; | 80 | u32 dev; |
81 | u64 ino; | 81 | u64 ino; |
@@ -85,7 +85,6 @@ struct autofs_wait_queue { | |||
85 | pid_t tgid; | 85 | pid_t tgid; |
86 | /* This is for status reporting upon return */ | 86 | /* This is for status reporting upon return */ |
87 | int status; | 87 | int status; |
88 | atomic_t notify; | ||
89 | atomic_t wait_ctr; | 88 | atomic_t wait_ctr; |
90 | }; | 89 | }; |
91 | 90 | ||
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 84e030c8ddd0..5100f984783f 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
@@ -327,6 +327,7 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags) | |||
327 | static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) | 327 | static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) |
328 | { | 328 | { |
329 | struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); | 329 | struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); |
330 | struct autofs_info *ino = autofs4_dentry_ino(dentry); | ||
330 | int oz_mode = autofs4_oz_mode(sbi); | 331 | int oz_mode = autofs4_oz_mode(sbi); |
331 | unsigned int lookup_type; | 332 | unsigned int lookup_type; |
332 | int status; | 333 | int status; |
@@ -340,13 +341,8 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
340 | if (oz_mode || !lookup_type) | 341 | if (oz_mode || !lookup_type) |
341 | goto done; | 342 | goto done; |
342 | 343 | ||
343 | /* | 344 | /* If an expire request is pending wait for it. */ |
344 | * If a request is pending wait for it. | 345 | if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) { |
345 | * If it's a mount then it won't be expired till at least | ||
346 | * a liitle later and if it's an expire then we might need | ||
347 | * to mount it again. | ||
348 | */ | ||
349 | if (autofs4_ispending(dentry)) { | ||
350 | DPRINTK("waiting for active request %p name=%.*s", | 346 | DPRINTK("waiting for active request %p name=%.*s", |
351 | dentry, dentry->d_name.len, dentry->d_name.name); | 347 | dentry, dentry->d_name.len, dentry->d_name.name); |
352 | 348 | ||
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 142ab6aa2aa1..ce103e7b0bc3 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c | |||
@@ -189,14 +189,30 @@ static int autofs4_getpath(struct autofs_sb_info *sbi, | |||
189 | return len; | 189 | return len; |
190 | } | 190 | } |
191 | 191 | ||
192 | static struct autofs_wait_queue * | ||
193 | autofs4_find_wait(struct autofs_sb_info *sbi, | ||
194 | char *name, unsigned int hash, unsigned int len) | ||
195 | { | ||
196 | struct autofs_wait_queue *wq; | ||
197 | |||
198 | for (wq = sbi->queues; wq; wq = wq->next) { | ||
199 | if (wq->hash == hash && | ||
200 | wq->len == len && | ||
201 | wq->name && !memcmp(wq->name, name, len)) | ||
202 | break; | ||
203 | } | ||
204 | return wq; | ||
205 | } | ||
206 | |||
192 | int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | 207 | int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, |
193 | enum autofs_notify notify) | 208 | enum autofs_notify notify) |
194 | { | 209 | { |
210 | struct autofs_info *ino; | ||
195 | struct autofs_wait_queue *wq; | 211 | struct autofs_wait_queue *wq; |
196 | char *name; | 212 | char *name; |
197 | unsigned int len = 0; | 213 | unsigned int len = 0; |
198 | unsigned int hash = 0; | 214 | unsigned int hash = 0; |
199 | int status; | 215 | int status, type; |
200 | 216 | ||
201 | /* In catatonic mode, we don't wait for nobody */ | 217 | /* In catatonic mode, we don't wait for nobody */ |
202 | if (sbi->catatonic) | 218 | if (sbi->catatonic) |
@@ -223,21 +239,41 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
223 | return -EINTR; | 239 | return -EINTR; |
224 | } | 240 | } |
225 | 241 | ||
226 | for (wq = sbi->queues ; wq ; wq = wq->next) { | 242 | wq = autofs4_find_wait(sbi, name, hash, len); |
227 | if (wq->hash == dentry->d_name.hash && | 243 | ino = autofs4_dentry_ino(dentry); |
228 | wq->len == len && | 244 | if (!wq && ino && notify == NFY_NONE) { |
229 | wq->name && !memcmp(wq->name, name, len)) | 245 | /* |
230 | break; | 246 | * Either we've betean the pending expire to post it's |
231 | } | 247 | * wait or it finished while we waited on the mutex. |
248 | * So we need to wait till either, the wait appears | ||
249 | * or the expire finishes. | ||
250 | */ | ||
251 | |||
252 | while (ino->flags & AUTOFS_INF_EXPIRING) { | ||
253 | mutex_unlock(&sbi->wq_mutex); | ||
254 | schedule_timeout_interruptible(HZ/10); | ||
255 | if (mutex_lock_interruptible(&sbi->wq_mutex)) { | ||
256 | kfree(name); | ||
257 | return -EINTR; | ||
258 | } | ||
259 | wq = autofs4_find_wait(sbi, name, hash, len); | ||
260 | if (wq) | ||
261 | break; | ||
262 | } | ||
232 | 263 | ||
233 | if (!wq) { | 264 | /* |
234 | /* Can't wait for an expire if there's no mount */ | 265 | * Not ideal but the status has already gone. Of the two |
235 | if (notify == NFY_NONE && !d_mountpoint(dentry)) { | 266 | * cases where we wait on NFY_NONE neither depend on the |
267 | * return status of the wait. | ||
268 | */ | ||
269 | if (!wq) { | ||
236 | kfree(name); | 270 | kfree(name); |
237 | mutex_unlock(&sbi->wq_mutex); | 271 | mutex_unlock(&sbi->wq_mutex); |
238 | return -ENOENT; | 272 | return 0; |
239 | } | 273 | } |
274 | } | ||
240 | 275 | ||
276 | if (!wq) { | ||
241 | /* Create a new wait queue */ | 277 | /* Create a new wait queue */ |
242 | wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); | 278 | wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); |
243 | if (!wq) { | 279 | if (!wq) { |
@@ -263,20 +299,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
263 | wq->tgid = current->tgid; | 299 | wq->tgid = current->tgid; |
264 | wq->status = -EINTR; /* Status return if interrupted */ | 300 | wq->status = -EINTR; /* Status return if interrupted */ |
265 | atomic_set(&wq->wait_ctr, 2); | 301 | atomic_set(&wq->wait_ctr, 2); |
266 | atomic_set(&wq->notify, 1); | ||
267 | mutex_unlock(&sbi->wq_mutex); | ||
268 | } else { | ||
269 | atomic_inc(&wq->wait_ctr); | ||
270 | mutex_unlock(&sbi->wq_mutex); | 302 | mutex_unlock(&sbi->wq_mutex); |
271 | kfree(name); | ||
272 | DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", | ||
273 | (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); | ||
274 | } | ||
275 | |||
276 | if (notify != NFY_NONE && atomic_read(&wq->notify)) { | ||
277 | int type; | ||
278 | |||
279 | atomic_dec(&wq->notify); | ||
280 | 303 | ||
281 | if (sbi->version < 5) { | 304 | if (sbi->version < 5) { |
282 | if (notify == NFY_MOUNT) | 305 | if (notify == NFY_MOUNT) |
@@ -299,6 +322,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
299 | 322 | ||
300 | /* autofs4_notify_daemon() may block */ | 323 | /* autofs4_notify_daemon() may block */ |
301 | autofs4_notify_daemon(sbi, wq, type); | 324 | autofs4_notify_daemon(sbi, wq, type); |
325 | } else { | ||
326 | atomic_inc(&wq->wait_ctr); | ||
327 | mutex_unlock(&sbi->wq_mutex); | ||
328 | kfree(name); | ||
329 | DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", | ||
330 | (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); | ||
302 | } | 331 | } |
303 | 332 | ||
304 | /* wq->name is NULL if and only if the lock is already released */ | 333 | /* wq->name is NULL if and only if the lock is already released */ |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 69f44dcdb0b4..b1c902e319c1 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
@@ -428,7 +428,6 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
428 | loff_t fpos; | 428 | loff_t fpos; |
429 | unsigned long start_code, end_code; | 429 | unsigned long start_code, end_code; |
430 | int ret; | 430 | int ret; |
431 | int exec_fileno; | ||
432 | 431 | ||
433 | hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */ | 432 | hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */ |
434 | inode = bprm->file->f_dentry->d_inode; | 433 | inode = bprm->file->f_dentry->d_inode; |
@@ -502,21 +501,12 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
502 | goto err; | 501 | goto err; |
503 | } | 502 | } |
504 | 503 | ||
505 | /* check file descriptor */ | ||
506 | exec_fileno = get_unused_fd(); | ||
507 | if (exec_fileno < 0) { | ||
508 | ret = -EMFILE; | ||
509 | goto err; | ||
510 | } | ||
511 | get_file(bprm->file); | ||
512 | fd_install(exec_fileno, bprm->file); | ||
513 | |||
514 | /* Flush all traces of the currently running executable */ | 504 | /* Flush all traces of the currently running executable */ |
515 | if (id == 0) { | 505 | if (id == 0) { |
516 | result = flush_old_exec(bprm); | 506 | result = flush_old_exec(bprm); |
517 | if (result) { | 507 | if (result) { |
518 | ret = result; | 508 | ret = result; |
519 | goto err_close; | 509 | goto err; |
520 | } | 510 | } |
521 | 511 | ||
522 | /* OK, This is the point of no return */ | 512 | /* OK, This is the point of no return */ |
@@ -548,7 +538,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
548 | textpos = (unsigned long) -ENOMEM; | 538 | textpos = (unsigned long) -ENOMEM; |
549 | printk("Unable to mmap process text, errno %d\n", (int)-textpos); | 539 | printk("Unable to mmap process text, errno %d\n", (int)-textpos); |
550 | ret = textpos; | 540 | ret = textpos; |
551 | goto err_close; | 541 | goto err; |
552 | } | 542 | } |
553 | 543 | ||
554 | down_write(¤t->mm->mmap_sem); | 544 | down_write(¤t->mm->mmap_sem); |
@@ -564,7 +554,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
564 | (int)-datapos); | 554 | (int)-datapos); |
565 | do_munmap(current->mm, textpos, text_len); | 555 | do_munmap(current->mm, textpos, text_len); |
566 | ret = realdatastart; | 556 | ret = realdatastart; |
567 | goto err_close; | 557 | goto err; |
568 | } | 558 | } |
569 | datapos = realdatastart + MAX_SHARED_LIBS * sizeof(unsigned long); | 559 | datapos = realdatastart + MAX_SHARED_LIBS * sizeof(unsigned long); |
570 | 560 | ||
@@ -587,7 +577,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
587 | do_munmap(current->mm, textpos, text_len); | 577 | do_munmap(current->mm, textpos, text_len); |
588 | do_munmap(current->mm, realdatastart, data_len + extra); | 578 | do_munmap(current->mm, realdatastart, data_len + extra); |
589 | ret = result; | 579 | ret = result; |
590 | goto err_close; | 580 | goto err; |
591 | } | 581 | } |
592 | 582 | ||
593 | reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len)); | 583 | reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len)); |
@@ -606,7 +596,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
606 | printk("Unable to allocate RAM for process text/data, errno %d\n", | 596 | printk("Unable to allocate RAM for process text/data, errno %d\n", |
607 | (int)-textpos); | 597 | (int)-textpos); |
608 | ret = textpos; | 598 | ret = textpos; |
609 | goto err_close; | 599 | goto err; |
610 | } | 600 | } |
611 | 601 | ||
612 | realdatastart = textpos + ntohl(hdr->data_start); | 602 | realdatastart = textpos + ntohl(hdr->data_start); |
@@ -652,7 +642,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
652 | do_munmap(current->mm, textpos, text_len + data_len + extra + | 642 | do_munmap(current->mm, textpos, text_len + data_len + extra + |
653 | MAX_SHARED_LIBS * sizeof(unsigned long)); | 643 | MAX_SHARED_LIBS * sizeof(unsigned long)); |
654 | ret = result; | 644 | ret = result; |
655 | goto err_close; | 645 | goto err; |
656 | } | 646 | } |
657 | } | 647 | } |
658 | 648 | ||
@@ -717,7 +707,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
717 | addr = calc_reloc(*rp, libinfo, id, 0); | 707 | addr = calc_reloc(*rp, libinfo, id, 0); |
718 | if (addr == RELOC_FAILED) { | 708 | if (addr == RELOC_FAILED) { |
719 | ret = -ENOEXEC; | 709 | ret = -ENOEXEC; |
720 | goto err_close; | 710 | goto err; |
721 | } | 711 | } |
722 | *rp = addr; | 712 | *rp = addr; |
723 | } | 713 | } |
@@ -747,7 +737,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
747 | rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1); | 737 | rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1); |
748 | if (rp == (unsigned long *)RELOC_FAILED) { | 738 | if (rp == (unsigned long *)RELOC_FAILED) { |
749 | ret = -ENOEXEC; | 739 | ret = -ENOEXEC; |
750 | goto err_close; | 740 | goto err; |
751 | } | 741 | } |
752 | 742 | ||
753 | /* Get the pointer's value. */ | 743 | /* Get the pointer's value. */ |
@@ -762,7 +752,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
762 | addr = calc_reloc(addr, libinfo, id, 0); | 752 | addr = calc_reloc(addr, libinfo, id, 0); |
763 | if (addr == RELOC_FAILED) { | 753 | if (addr == RELOC_FAILED) { |
764 | ret = -ENOEXEC; | 754 | ret = -ENOEXEC; |
765 | goto err_close; | 755 | goto err; |
766 | } | 756 | } |
767 | 757 | ||
768 | /* Write back the relocated pointer. */ | 758 | /* Write back the relocated pointer. */ |
@@ -783,8 +773,6 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
783 | stack_len); | 773 | stack_len); |
784 | 774 | ||
785 | return 0; | 775 | return 0; |
786 | err_close: | ||
787 | sys_close(exec_fileno); | ||
788 | err: | 776 | err: |
789 | return ret; | 777 | return ret; |
790 | } | 778 | } |
@@ -1116,6 +1116,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) | |||
1116 | bp->bio1.bi_io_vec = &bp->bv1; | 1116 | bp->bio1.bi_io_vec = &bp->bv1; |
1117 | bp->bio2.bi_io_vec = &bp->bv2; | 1117 | bp->bio2.bi_io_vec = &bp->bv2; |
1118 | 1118 | ||
1119 | bp->bio1.bi_max_vecs = 1; | ||
1120 | bp->bio2.bi_max_vecs = 1; | ||
1121 | |||
1119 | bp->bio1.bi_end_io = bio_pair_end_1; | 1122 | bp->bio1.bi_end_io = bio_pair_end_1; |
1120 | bp->bio2.bi_end_io = bio_pair_end_2; | 1123 | bp->bio2.bi_end_io = bio_pair_end_2; |
1121 | 1124 | ||
diff --git a/fs/compat.c b/fs/compat.c index 970888aad843..b1f64786a613 100644 --- a/fs/compat.c +++ b/fs/compat.c | |||
@@ -1913,7 +1913,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, | |||
1913 | } | 1913 | } |
1914 | 1914 | ||
1915 | if (sigmask) { | 1915 | if (sigmask) { |
1916 | if (sigsetsize |= sizeof(compat_sigset_t)) | 1916 | if (sigsetsize != sizeof(compat_sigset_t)) |
1917 | return -EINVAL; | 1917 | return -EINVAL; |
1918 | if (copy_from_user(&ss32, sigmask, sizeof(ss32))) | 1918 | if (copy_from_user(&ss32, sigmask, sizeof(ss32))) |
1919 | return -EFAULT; | 1919 | return -EFAULT; |
@@ -2030,109 +2030,115 @@ union compat_nfsctl_res { | |||
2030 | struct knfsd_fh cr32_getfs; | 2030 | struct knfsd_fh cr32_getfs; |
2031 | }; | 2031 | }; |
2032 | 2032 | ||
2033 | static int compat_nfs_svc_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) | 2033 | static int compat_nfs_svc_trans(struct nfsctl_arg *karg, |
2034 | struct compat_nfsctl_arg __user *arg) | ||
2034 | { | 2035 | { |
2035 | int err; | 2036 | if (!access_ok(VERIFY_READ, &arg->ca32_svc, sizeof(arg->ca32_svc)) || |
2036 | 2037 | get_user(karg->ca_version, &arg->ca32_version) || | |
2037 | err = access_ok(VERIFY_READ, &arg->ca32_svc, sizeof(arg->ca32_svc)); | 2038 | __get_user(karg->ca_svc.svc_port, &arg->ca32_svc.svc32_port) || |
2038 | err |= get_user(karg->ca_version, &arg->ca32_version); | 2039 | __get_user(karg->ca_svc.svc_nthreads, |
2039 | err |= __get_user(karg->ca_svc.svc_port, &arg->ca32_svc.svc32_port); | 2040 | &arg->ca32_svc.svc32_nthreads)) |
2040 | err |= __get_user(karg->ca_svc.svc_nthreads, &arg->ca32_svc.svc32_nthreads); | 2041 | return -EFAULT; |
2041 | return (err) ? -EFAULT : 0; | 2042 | return 0; |
2042 | } | 2043 | } |
2043 | 2044 | ||
2044 | static int compat_nfs_clnt_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) | 2045 | static int compat_nfs_clnt_trans(struct nfsctl_arg *karg, |
2045 | { | 2046 | struct compat_nfsctl_arg __user *arg) |
2046 | int err; | 2047 | { |
2047 | 2048 | if (!access_ok(VERIFY_READ, &arg->ca32_client, | |
2048 | err = access_ok(VERIFY_READ, &arg->ca32_client, sizeof(arg->ca32_client)); | 2049 | sizeof(arg->ca32_client)) || |
2049 | err |= get_user(karg->ca_version, &arg->ca32_version); | 2050 | get_user(karg->ca_version, &arg->ca32_version) || |
2050 | err |= __copy_from_user(&karg->ca_client.cl_ident[0], | 2051 | __copy_from_user(&karg->ca_client.cl_ident[0], |
2051 | &arg->ca32_client.cl32_ident[0], | 2052 | &arg->ca32_client.cl32_ident[0], |
2052 | NFSCLNT_IDMAX); | 2053 | NFSCLNT_IDMAX) || |
2053 | err |= __get_user(karg->ca_client.cl_naddr, &arg->ca32_client.cl32_naddr); | 2054 | __get_user(karg->ca_client.cl_naddr, |
2054 | err |= __copy_from_user(&karg->ca_client.cl_addrlist[0], | 2055 | &arg->ca32_client.cl32_naddr) || |
2055 | &arg->ca32_client.cl32_addrlist[0], | 2056 | __copy_from_user(&karg->ca_client.cl_addrlist[0], |
2056 | (sizeof(struct in_addr) * NFSCLNT_ADDRMAX)); | 2057 | &arg->ca32_client.cl32_addrlist[0], |
2057 | err |= __get_user(karg->ca_client.cl_fhkeytype, | 2058 | (sizeof(struct in_addr) * NFSCLNT_ADDRMAX)) || |
2058 | &arg->ca32_client.cl32_fhkeytype); | 2059 | __get_user(karg->ca_client.cl_fhkeytype, |
2059 | err |= __get_user(karg->ca_client.cl_fhkeylen, | 2060 | &arg->ca32_client.cl32_fhkeytype) || |
2060 | &arg->ca32_client.cl32_fhkeylen); | 2061 | __get_user(karg->ca_client.cl_fhkeylen, |
2061 | err |= __copy_from_user(&karg->ca_client.cl_fhkey[0], | 2062 | &arg->ca32_client.cl32_fhkeylen) || |
2062 | &arg->ca32_client.cl32_fhkey[0], | 2063 | __copy_from_user(&karg->ca_client.cl_fhkey[0], |
2063 | NFSCLNT_KEYMAX); | 2064 | &arg->ca32_client.cl32_fhkey[0], |
2065 | NFSCLNT_KEYMAX)) | ||
2066 | return -EFAULT; | ||
2064 | 2067 | ||
2065 | return (err) ? -EFAULT : 0; | 2068 | return 0; |
2066 | } | 2069 | } |
2067 | 2070 | ||
2068 | static int compat_nfs_exp_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) | 2071 | static int compat_nfs_exp_trans(struct nfsctl_arg *karg, |
2069 | { | 2072 | struct compat_nfsctl_arg __user *arg) |
2070 | int err; | 2073 | { |
2071 | 2074 | if (!access_ok(VERIFY_READ, &arg->ca32_export, | |
2072 | err = access_ok(VERIFY_READ, &arg->ca32_export, sizeof(arg->ca32_export)); | 2075 | sizeof(arg->ca32_export)) || |
2073 | err |= get_user(karg->ca_version, &arg->ca32_version); | 2076 | get_user(karg->ca_version, &arg->ca32_version) || |
2074 | err |= __copy_from_user(&karg->ca_export.ex_client[0], | 2077 | __copy_from_user(&karg->ca_export.ex_client[0], |
2075 | &arg->ca32_export.ex32_client[0], | 2078 | &arg->ca32_export.ex32_client[0], |
2076 | NFSCLNT_IDMAX); | 2079 | NFSCLNT_IDMAX) || |
2077 | err |= __copy_from_user(&karg->ca_export.ex_path[0], | 2080 | __copy_from_user(&karg->ca_export.ex_path[0], |
2078 | &arg->ca32_export.ex32_path[0], | 2081 | &arg->ca32_export.ex32_path[0], |
2079 | NFS_MAXPATHLEN); | 2082 | NFS_MAXPATHLEN) || |
2080 | err |= __get_user(karg->ca_export.ex_dev, | 2083 | __get_user(karg->ca_export.ex_dev, |
2081 | &arg->ca32_export.ex32_dev); | 2084 | &arg->ca32_export.ex32_dev) || |
2082 | err |= __get_user(karg->ca_export.ex_ino, | 2085 | __get_user(karg->ca_export.ex_ino, |
2083 | &arg->ca32_export.ex32_ino); | 2086 | &arg->ca32_export.ex32_ino) || |
2084 | err |= __get_user(karg->ca_export.ex_flags, | 2087 | __get_user(karg->ca_export.ex_flags, |
2085 | &arg->ca32_export.ex32_flags); | 2088 | &arg->ca32_export.ex32_flags) || |
2086 | err |= __get_user(karg->ca_export.ex_anon_uid, | 2089 | __get_user(karg->ca_export.ex_anon_uid, |
2087 | &arg->ca32_export.ex32_anon_uid); | 2090 | &arg->ca32_export.ex32_anon_uid) || |
2088 | err |= __get_user(karg->ca_export.ex_anon_gid, | 2091 | __get_user(karg->ca_export.ex_anon_gid, |
2089 | &arg->ca32_export.ex32_anon_gid); | 2092 | &arg->ca32_export.ex32_anon_gid)) |
2093 | return -EFAULT; | ||
2090 | SET_UID(karg->ca_export.ex_anon_uid, karg->ca_export.ex_anon_uid); | 2094 | SET_UID(karg->ca_export.ex_anon_uid, karg->ca_export.ex_anon_uid); |
2091 | SET_GID(karg->ca_export.ex_anon_gid, karg->ca_export.ex_anon_gid); | 2095 | SET_GID(karg->ca_export.ex_anon_gid, karg->ca_export.ex_anon_gid); |
2092 | 2096 | ||
2093 | return (err) ? -EFAULT : 0; | 2097 | return 0; |
2094 | } | 2098 | } |
2095 | 2099 | ||
2096 | static int compat_nfs_getfd_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) | 2100 | static int compat_nfs_getfd_trans(struct nfsctl_arg *karg, |
2097 | { | 2101 | struct compat_nfsctl_arg __user *arg) |
2098 | int err; | 2102 | { |
2099 | 2103 | if (!access_ok(VERIFY_READ, &arg->ca32_getfd, | |
2100 | err = access_ok(VERIFY_READ, &arg->ca32_getfd, sizeof(arg->ca32_getfd)); | 2104 | sizeof(arg->ca32_getfd)) || |
2101 | err |= get_user(karg->ca_version, &arg->ca32_version); | 2105 | get_user(karg->ca_version, &arg->ca32_version) || |
2102 | err |= __copy_from_user(&karg->ca_getfd.gd_addr, | 2106 | __copy_from_user(&karg->ca_getfd.gd_addr, |
2103 | &arg->ca32_getfd.gd32_addr, | 2107 | &arg->ca32_getfd.gd32_addr, |
2104 | (sizeof(struct sockaddr))); | 2108 | (sizeof(struct sockaddr))) || |
2105 | err |= __copy_from_user(&karg->ca_getfd.gd_path, | 2109 | __copy_from_user(&karg->ca_getfd.gd_path, |
2106 | &arg->ca32_getfd.gd32_path, | 2110 | &arg->ca32_getfd.gd32_path, |
2107 | (NFS_MAXPATHLEN+1)); | 2111 | (NFS_MAXPATHLEN+1)) || |
2108 | err |= __get_user(karg->ca_getfd.gd_version, | 2112 | __get_user(karg->ca_getfd.gd_version, |
2109 | &arg->ca32_getfd.gd32_version); | 2113 | &arg->ca32_getfd.gd32_version)) |
2114 | return -EFAULT; | ||
2110 | 2115 | ||
2111 | return (err) ? -EFAULT : 0; | 2116 | return 0; |
2112 | } | 2117 | } |
2113 | 2118 | ||
2114 | static int compat_nfs_getfs_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) | 2119 | static int compat_nfs_getfs_trans(struct nfsctl_arg *karg, |
2120 | struct compat_nfsctl_arg __user *arg) | ||
2115 | { | 2121 | { |
2116 | int err; | 2122 | if (!access_ok(VERIFY_READ,&arg->ca32_getfs,sizeof(arg->ca32_getfs)) || |
2117 | 2123 | get_user(karg->ca_version, &arg->ca32_version) || | |
2118 | err = access_ok(VERIFY_READ, &arg->ca32_getfs, sizeof(arg->ca32_getfs)); | 2124 | __copy_from_user(&karg->ca_getfs.gd_addr, |
2119 | err |= get_user(karg->ca_version, &arg->ca32_version); | 2125 | &arg->ca32_getfs.gd32_addr, |
2120 | err |= __copy_from_user(&karg->ca_getfs.gd_addr, | 2126 | (sizeof(struct sockaddr))) || |
2121 | &arg->ca32_getfs.gd32_addr, | 2127 | __copy_from_user(&karg->ca_getfs.gd_path, |
2122 | (sizeof(struct sockaddr))); | 2128 | &arg->ca32_getfs.gd32_path, |
2123 | err |= __copy_from_user(&karg->ca_getfs.gd_path, | 2129 | (NFS_MAXPATHLEN+1)) || |
2124 | &arg->ca32_getfs.gd32_path, | 2130 | __get_user(karg->ca_getfs.gd_maxlen, |
2125 | (NFS_MAXPATHLEN+1)); | 2131 | &arg->ca32_getfs.gd32_maxlen)) |
2126 | err |= __get_user(karg->ca_getfs.gd_maxlen, | 2132 | return -EFAULT; |
2127 | &arg->ca32_getfs.gd32_maxlen); | ||
2128 | 2133 | ||
2129 | return (err) ? -EFAULT : 0; | 2134 | return 0; |
2130 | } | 2135 | } |
2131 | 2136 | ||
2132 | /* This really doesn't need translations, we are only passing | 2137 | /* This really doesn't need translations, we are only passing |
2133 | * back a union which contains opaque nfs file handle data. | 2138 | * back a union which contains opaque nfs file handle data. |
2134 | */ | 2139 | */ |
2135 | static int compat_nfs_getfh_res_trans(union nfsctl_res *kres, union compat_nfsctl_res __user *res) | 2140 | static int compat_nfs_getfh_res_trans(union nfsctl_res *kres, |
2141 | union compat_nfsctl_res __user *res) | ||
2136 | { | 2142 | { |
2137 | int err; | 2143 | int err; |
2138 | 2144 | ||
@@ -2141,8 +2147,9 @@ static int compat_nfs_getfh_res_trans(union nfsctl_res *kres, union compat_nfsct | |||
2141 | return (err) ? -EFAULT : 0; | 2147 | return (err) ? -EFAULT : 0; |
2142 | } | 2148 | } |
2143 | 2149 | ||
2144 | asmlinkage long compat_sys_nfsservctl(int cmd, struct compat_nfsctl_arg __user *arg, | 2150 | asmlinkage long compat_sys_nfsservctl(int cmd, |
2145 | union compat_nfsctl_res __user *res) | 2151 | struct compat_nfsctl_arg __user *arg, |
2152 | union compat_nfsctl_res __user *res) | ||
2146 | { | 2153 | { |
2147 | struct nfsctl_arg *karg; | 2154 | struct nfsctl_arg *karg; |
2148 | union nfsctl_res *kres; | 2155 | union nfsctl_res *kres; |
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 5638c8f9362f..5f952187fc53 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
@@ -505,13 +505,15 @@ static int populate_groups(struct config_group *group) | |||
505 | int i; | 505 | int i; |
506 | 506 | ||
507 | if (group->default_groups) { | 507 | if (group->default_groups) { |
508 | /* FYI, we're faking mkdir here | 508 | /* |
509 | * FYI, we're faking mkdir here | ||
509 | * I'm not sure we need this semaphore, as we're called | 510 | * I'm not sure we need this semaphore, as we're called |
510 | * from our parent's mkdir. That holds our parent's | 511 | * from our parent's mkdir. That holds our parent's |
511 | * i_mutex, so afaik lookup cannot continue through our | 512 | * i_mutex, so afaik lookup cannot continue through our |
512 | * parent to find us, let alone mess with our tree. | 513 | * parent to find us, let alone mess with our tree. |
513 | * That said, taking our i_mutex is closer to mkdir | 514 | * That said, taking our i_mutex is closer to mkdir |
514 | * emulation, and shouldn't hurt. */ | 515 | * emulation, and shouldn't hurt. |
516 | */ | ||
515 | mutex_lock(&dentry->d_inode->i_mutex); | 517 | mutex_lock(&dentry->d_inode->i_mutex); |
516 | 518 | ||
517 | for (i = 0; group->default_groups[i]; i++) { | 519 | for (i = 0; group->default_groups[i]; i++) { |
@@ -546,20 +548,34 @@ static void unlink_obj(struct config_item *item) | |||
546 | 548 | ||
547 | item->ci_group = NULL; | 549 | item->ci_group = NULL; |
548 | item->ci_parent = NULL; | 550 | item->ci_parent = NULL; |
551 | |||
552 | /* Drop the reference for ci_entry */ | ||
549 | config_item_put(item); | 553 | config_item_put(item); |
550 | 554 | ||
555 | /* Drop the reference for ci_parent */ | ||
551 | config_group_put(group); | 556 | config_group_put(group); |
552 | } | 557 | } |
553 | } | 558 | } |
554 | 559 | ||
555 | static void link_obj(struct config_item *parent_item, struct config_item *item) | 560 | static void link_obj(struct config_item *parent_item, struct config_item *item) |
556 | { | 561 | { |
557 | /* Parent seems redundant with group, but it makes certain | 562 | /* |
558 | * traversals much nicer. */ | 563 | * Parent seems redundant with group, but it makes certain |
564 | * traversals much nicer. | ||
565 | */ | ||
559 | item->ci_parent = parent_item; | 566 | item->ci_parent = parent_item; |
567 | |||
568 | /* | ||
569 | * We hold a reference on the parent for the child's ci_parent | ||
570 | * link. | ||
571 | */ | ||
560 | item->ci_group = config_group_get(to_config_group(parent_item)); | 572 | item->ci_group = config_group_get(to_config_group(parent_item)); |
561 | list_add_tail(&item->ci_entry, &item->ci_group->cg_children); | 573 | list_add_tail(&item->ci_entry, &item->ci_group->cg_children); |
562 | 574 | ||
575 | /* | ||
576 | * We hold a reference on the child for ci_entry on the parent's | ||
577 | * cg_children | ||
578 | */ | ||
563 | config_item_get(item); | 579 | config_item_get(item); |
564 | } | 580 | } |
565 | 581 | ||
@@ -684,6 +700,10 @@ static void client_drop_item(struct config_item *parent_item, | |||
684 | type = parent_item->ci_type; | 700 | type = parent_item->ci_type; |
685 | BUG_ON(!type); | 701 | BUG_ON(!type); |
686 | 702 | ||
703 | /* | ||
704 | * If ->drop_item() exists, it is responsible for the | ||
705 | * config_item_put(). | ||
706 | */ | ||
687 | if (type->ct_group_ops && type->ct_group_ops->drop_item) | 707 | if (type->ct_group_ops && type->ct_group_ops->drop_item) |
688 | type->ct_group_ops->drop_item(to_config_group(parent_item), | 708 | type->ct_group_ops->drop_item(to_config_group(parent_item), |
689 | item); | 709 | item); |
@@ -694,23 +714,28 @@ static void client_drop_item(struct config_item *parent_item, | |||
694 | 714 | ||
695 | static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | 715 | static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) |
696 | { | 716 | { |
697 | int ret; | 717 | int ret, module_got = 0; |
698 | struct config_group *group; | 718 | struct config_group *group; |
699 | struct config_item *item; | 719 | struct config_item *item; |
700 | struct config_item *parent_item; | 720 | struct config_item *parent_item; |
701 | struct configfs_subsystem *subsys; | 721 | struct configfs_subsystem *subsys; |
702 | struct configfs_dirent *sd; | 722 | struct configfs_dirent *sd; |
703 | struct config_item_type *type; | 723 | struct config_item_type *type; |
704 | struct module *owner; | 724 | struct module *owner = NULL; |
705 | char *name; | 725 | char *name; |
706 | 726 | ||
707 | if (dentry->d_parent == configfs_sb->s_root) | 727 | if (dentry->d_parent == configfs_sb->s_root) { |
708 | return -EPERM; | 728 | ret = -EPERM; |
729 | goto out; | ||
730 | } | ||
709 | 731 | ||
710 | sd = dentry->d_parent->d_fsdata; | 732 | sd = dentry->d_parent->d_fsdata; |
711 | if (!(sd->s_type & CONFIGFS_USET_DIR)) | 733 | if (!(sd->s_type & CONFIGFS_USET_DIR)) { |
712 | return -EPERM; | 734 | ret = -EPERM; |
735 | goto out; | ||
736 | } | ||
713 | 737 | ||
738 | /* Get a working ref for the duration of this function */ | ||
714 | parent_item = configfs_get_config_item(dentry->d_parent); | 739 | parent_item = configfs_get_config_item(dentry->d_parent); |
715 | type = parent_item->ci_type; | 740 | type = parent_item->ci_type; |
716 | subsys = to_config_group(parent_item)->cg_subsys; | 741 | subsys = to_config_group(parent_item)->cg_subsys; |
@@ -719,15 +744,16 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
719 | if (!type || !type->ct_group_ops || | 744 | if (!type || !type->ct_group_ops || |
720 | (!type->ct_group_ops->make_group && | 745 | (!type->ct_group_ops->make_group && |
721 | !type->ct_group_ops->make_item)) { | 746 | !type->ct_group_ops->make_item)) { |
722 | config_item_put(parent_item); | 747 | ret = -EPERM; /* Lack-of-mkdir returns -EPERM */ |
723 | return -EPERM; /* What lack-of-mkdir returns */ | 748 | goto out_put; |
724 | } | 749 | } |
725 | 750 | ||
726 | name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL); | 751 | name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL); |
727 | if (!name) { | 752 | if (!name) { |
728 | config_item_put(parent_item); | 753 | ret = -ENOMEM; |
729 | return -ENOMEM; | 754 | goto out_put; |
730 | } | 755 | } |
756 | |||
731 | snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name); | 757 | snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name); |
732 | 758 | ||
733 | down(&subsys->su_sem); | 759 | down(&subsys->su_sem); |
@@ -748,40 +774,67 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
748 | 774 | ||
749 | kfree(name); | 775 | kfree(name); |
750 | if (!item) { | 776 | if (!item) { |
751 | config_item_put(parent_item); | 777 | /* |
752 | return -ENOMEM; | 778 | * If item == NULL, then link_obj() was never called. |
779 | * There are no extra references to clean up. | ||
780 | */ | ||
781 | ret = -ENOMEM; | ||
782 | goto out_put; | ||
753 | } | 783 | } |
754 | 784 | ||
755 | ret = -EINVAL; | 785 | /* |
786 | * link_obj() has been called (via link_group() for groups). | ||
787 | * From here on out, errors must clean that up. | ||
788 | */ | ||
789 | |||
756 | type = item->ci_type; | 790 | type = item->ci_type; |
757 | if (type) { | 791 | if (!type) { |
758 | owner = type->ct_owner; | 792 | ret = -EINVAL; |
759 | if (try_module_get(owner)) { | 793 | goto out_unlink; |
760 | if (group) { | 794 | } |
761 | ret = configfs_attach_group(parent_item, | ||
762 | item, | ||
763 | dentry); | ||
764 | } else { | ||
765 | ret = configfs_attach_item(parent_item, | ||
766 | item, | ||
767 | dentry); | ||
768 | } | ||
769 | 795 | ||
770 | if (ret) { | 796 | owner = type->ct_owner; |
771 | down(&subsys->su_sem); | 797 | if (!try_module_get(owner)) { |
772 | if (group) | 798 | ret = -EINVAL; |
773 | unlink_group(group); | 799 | goto out_unlink; |
774 | else | 800 | } |
775 | unlink_obj(item); | ||
776 | client_drop_item(parent_item, item); | ||
777 | up(&subsys->su_sem); | ||
778 | 801 | ||
779 | config_item_put(parent_item); | 802 | /* |
780 | module_put(owner); | 803 | * I hate doing it this way, but if there is |
781 | } | 804 | * an error, module_put() probably should |
782 | } | 805 | * happen after any cleanup. |
806 | */ | ||
807 | module_got = 1; | ||
808 | |||
809 | if (group) | ||
810 | ret = configfs_attach_group(parent_item, item, dentry); | ||
811 | else | ||
812 | ret = configfs_attach_item(parent_item, item, dentry); | ||
813 | |||
814 | out_unlink: | ||
815 | if (ret) { | ||
816 | /* Tear down everything we built up */ | ||
817 | down(&subsys->su_sem); | ||
818 | if (group) | ||
819 | unlink_group(group); | ||
820 | else | ||
821 | unlink_obj(item); | ||
822 | client_drop_item(parent_item, item); | ||
823 | up(&subsys->su_sem); | ||
824 | |||
825 | if (module_got) | ||
826 | module_put(owner); | ||
783 | } | 827 | } |
784 | 828 | ||
829 | out_put: | ||
830 | /* | ||
831 | * link_obj()/link_group() took a reference from child->parent, | ||
832 | * so the parent is safely pinned. We can drop our working | ||
833 | * reference. | ||
834 | */ | ||
835 | config_item_put(parent_item); | ||
836 | |||
837 | out: | ||
785 | return ret; | 838 | return ret; |
786 | } | 839 | } |
787 | 840 | ||
@@ -801,6 +854,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
801 | if (sd->s_type & CONFIGFS_USET_DEFAULT) | 854 | if (sd->s_type & CONFIGFS_USET_DEFAULT) |
802 | return -EPERM; | 855 | return -EPERM; |
803 | 856 | ||
857 | /* Get a working ref until we have the child */ | ||
804 | parent_item = configfs_get_config_item(dentry->d_parent); | 858 | parent_item = configfs_get_config_item(dentry->d_parent); |
805 | subsys = to_config_group(parent_item)->cg_subsys; | 859 | subsys = to_config_group(parent_item)->cg_subsys; |
806 | BUG_ON(!subsys); | 860 | BUG_ON(!subsys); |
@@ -817,6 +871,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
817 | return ret; | 871 | return ret; |
818 | } | 872 | } |
819 | 873 | ||
874 | /* Get a working ref for the duration of this function */ | ||
820 | item = configfs_get_config_item(dentry); | 875 | item = configfs_get_config_item(dentry); |
821 | 876 | ||
822 | /* Drop reference from above, item already holds one. */ | 877 | /* Drop reference from above, item already holds one. */ |
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index b06b54f1bbbb..4c39009350f3 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c | |||
@@ -102,7 +102,7 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent, | |||
102 | if (acceptable(context, result)) | 102 | if (acceptable(context, result)) |
103 | return result; | 103 | return result; |
104 | if (S_ISDIR(result->d_inode->i_mode)) { | 104 | if (S_ISDIR(result->d_inode->i_mode)) { |
105 | /* there is no other dentry, so fail */ | 105 | err = -EACCES; |
106 | goto err_result; | 106 | goto err_result; |
107 | } | 107 | } |
108 | 108 | ||
diff --git a/fs/inotify.c b/fs/inotify.c index 1f50302849c5..732ec4bd5774 100644 --- a/fs/inotify.c +++ b/fs/inotify.c | |||
@@ -848,7 +848,11 @@ static int inotify_release(struct inode *ignored, struct file *file) | |||
848 | inode = watch->inode; | 848 | inode = watch->inode; |
849 | mutex_lock(&inode->inotify_mutex); | 849 | mutex_lock(&inode->inotify_mutex); |
850 | mutex_lock(&dev->mutex); | 850 | mutex_lock(&dev->mutex); |
851 | remove_watch_no_event(watch, dev); | 851 | |
852 | /* make sure we didn't race with another list removal */ | ||
853 | if (likely(idr_find(&dev->idr, watch->wd))) | ||
854 | remove_watch_no_event(watch, dev); | ||
855 | |||
852 | mutex_unlock(&dev->mutex); | 856 | mutex_unlock(&dev->mutex); |
853 | mutex_unlock(&inode->inotify_mutex); | 857 | mutex_unlock(&inode->inotify_mutex); |
854 | put_inotify_watch(watch); | 858 | put_inotify_watch(watch); |
@@ -890,8 +894,7 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd) | |||
890 | mutex_lock(&dev->mutex); | 894 | mutex_lock(&dev->mutex); |
891 | 895 | ||
892 | /* make sure that we did not race */ | 896 | /* make sure that we did not race */ |
893 | watch = idr_find(&dev->idr, wd); | 897 | if (likely(idr_find(&dev->idr, wd) == watch)) |
894 | if (likely(watch)) | ||
895 | remove_watch(watch, dev); | 898 | remove_watch(watch, dev); |
896 | 899 | ||
897 | mutex_unlock(&dev->mutex); | 900 | mutex_unlock(&dev->mutex); |
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index d4d0c41490cd..1d46677afd17 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c | |||
@@ -438,7 +438,8 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info | |||
438 | if (c->mtd->point) { | 438 | if (c->mtd->point) { |
439 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); | 439 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); |
440 | if (!err && retlen < tn->csize) { | 440 | if (!err && retlen < tn->csize) { |
441 | JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize); | 441 | JFFS2_WARNING("MTD point returned len too short: %zu " |
442 | "instead of %u.\n", retlen, tn->csize); | ||
442 | c->mtd->unpoint(c->mtd, buffer, ofs, len); | 443 | c->mtd->unpoint(c->mtd, buffer, ofs, len); |
443 | } else if (err) | 444 | } else if (err) |
444 | JFFS2_WARNING("MTD point failed: error code %d.\n", err); | 445 | JFFS2_WARNING("MTD point failed: error code %d.\n", err); |
@@ -461,7 +462,8 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info | |||
461 | } | 462 | } |
462 | 463 | ||
463 | if (retlen != len) { | 464 | if (retlen != len) { |
464 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ofs, retlen, len); | 465 | JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", |
466 | ofs, retlen, len); | ||
465 | err = -EIO; | 467 | err = -EIO; |
466 | goto free_out; | 468 | goto free_out; |
467 | } | 469 | } |
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index f28696f235c4..2b220dd6b4e7 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c | |||
@@ -542,7 +542,7 @@ add_failed: | |||
542 | static int metapage_releasepage(struct page *page, gfp_t gfp_mask) | 542 | static int metapage_releasepage(struct page *page, gfp_t gfp_mask) |
543 | { | 543 | { |
544 | struct metapage *mp; | 544 | struct metapage *mp; |
545 | int busy = 0; | 545 | int ret = 1; |
546 | unsigned int offset; | 546 | unsigned int offset; |
547 | 547 | ||
548 | for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { | 548 | for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { |
@@ -552,30 +552,20 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask) | |||
552 | continue; | 552 | continue; |
553 | 553 | ||
554 | jfs_info("metapage_releasepage: mp = 0x%p", mp); | 554 | jfs_info("metapage_releasepage: mp = 0x%p", mp); |
555 | if (mp->count || mp->nohomeok) { | 555 | if (mp->count || mp->nohomeok || |
556 | test_bit(META_dirty, &mp->flag)) { | ||
556 | jfs_info("count = %ld, nohomeok = %d", mp->count, | 557 | jfs_info("count = %ld, nohomeok = %d", mp->count, |
557 | mp->nohomeok); | 558 | mp->nohomeok); |
558 | busy = 1; | 559 | ret = 0; |
559 | continue; | 560 | continue; |
560 | } | 561 | } |
561 | wait_on_page_writeback(page); | ||
562 | //WARN_ON(test_bit(META_dirty, &mp->flag)); | ||
563 | if (test_bit(META_dirty, &mp->flag)) { | ||
564 | dump_mem("dirty mp in metapage_releasepage", mp, | ||
565 | sizeof(struct metapage)); | ||
566 | dump_mem("page", page, sizeof(struct page)); | ||
567 | dump_stack(); | ||
568 | } | ||
569 | if (mp->lsn) | 562 | if (mp->lsn) |
570 | remove_from_logsync(mp); | 563 | remove_from_logsync(mp); |
571 | remove_metapage(page, mp); | 564 | remove_metapage(page, mp); |
572 | INCREMENT(mpStat.pagefree); | 565 | INCREMENT(mpStat.pagefree); |
573 | free_metapage(mp); | 566 | free_metapage(mp); |
574 | } | 567 | } |
575 | if (busy) | 568 | return ret; |
576 | return -1; | ||
577 | |||
578 | return 0; | ||
579 | } | 569 | } |
580 | 570 | ||
581 | static void metapage_invalidatepage(struct page *page, unsigned long offset) | 571 | static void metapage_invalidatepage(struct page *page, unsigned long offset) |
diff --git a/fs/namespace.c b/fs/namespace.c index 2c5f1f80bdc2..bf478addb852 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -899,13 +899,11 @@ static int do_change_type(struct nameidata *nd, int flag) | |||
899 | /* | 899 | /* |
900 | * do loopback mount. | 900 | * do loopback mount. |
901 | */ | 901 | */ |
902 | static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags, int mnt_flags) | 902 | static int do_loopback(struct nameidata *nd, char *old_name, int recurse) |
903 | { | 903 | { |
904 | struct nameidata old_nd; | 904 | struct nameidata old_nd; |
905 | struct vfsmount *mnt = NULL; | 905 | struct vfsmount *mnt = NULL; |
906 | int recurse = flags & MS_REC; | ||
907 | int err = mount_is_safe(nd); | 906 | int err = mount_is_safe(nd); |
908 | |||
909 | if (err) | 907 | if (err) |
910 | return err; | 908 | return err; |
911 | if (!old_name || !*old_name) | 909 | if (!old_name || !*old_name) |
@@ -939,7 +937,6 @@ static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags | |||
939 | spin_unlock(&vfsmount_lock); | 937 | spin_unlock(&vfsmount_lock); |
940 | release_mounts(&umount_list); | 938 | release_mounts(&umount_list); |
941 | } | 939 | } |
942 | mnt->mnt_flags = mnt_flags; | ||
943 | 940 | ||
944 | out: | 941 | out: |
945 | up_write(&namespace_sem); | 942 | up_write(&namespace_sem); |
@@ -1353,7 +1350,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page, | |||
1353 | retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags, | 1350 | retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags, |
1354 | data_page); | 1351 | data_page); |
1355 | else if (flags & MS_BIND) | 1352 | else if (flags & MS_BIND) |
1356 | retval = do_loopback(&nd, dev_name, flags, mnt_flags); | 1353 | retval = do_loopback(&nd, dev_name, flags & MS_REC); |
1357 | else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) | 1354 | else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) |
1358 | retval = do_change_type(&nd, flags); | 1355 | retval = do_change_type(&nd, flags); |
1359 | else if (flags & MS_MOVE) | 1356 | else if (flags & MS_MOVE) |
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 4e0578121d9a..3eec30000f3f 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c | |||
@@ -1066,9 +1066,11 @@ exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp, | |||
1066 | rv = nfserr_perm; | 1066 | rv = nfserr_perm; |
1067 | else if (IS_ERR(exp)) | 1067 | else if (IS_ERR(exp)) |
1068 | rv = nfserrno(PTR_ERR(exp)); | 1068 | rv = nfserrno(PTR_ERR(exp)); |
1069 | else | 1069 | else { |
1070 | rv = fh_compose(fhp, exp, | 1070 | rv = fh_compose(fhp, exp, |
1071 | fsid_key->ek_dentry, NULL); | 1071 | fsid_key->ek_dentry, NULL); |
1072 | exp_put(exp); | ||
1073 | } | ||
1072 | cache_put(&fsid_key->h, &svc_expkey_cache); | 1074 | cache_put(&fsid_key->h, &svc_expkey_cache); |
1073 | return rv; | 1075 | return rv; |
1074 | } | 1076 | } |
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 6aa92d0e6876..1d65f13f458c 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -1922,11 +1922,10 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl) | |||
1922 | value = kmalloc(size, GFP_KERNEL); | 1922 | value = kmalloc(size, GFP_KERNEL); |
1923 | if (!value) | 1923 | if (!value) |
1924 | return -ENOMEM; | 1924 | return -ENOMEM; |
1925 | size = posix_acl_to_xattr(acl, value, size); | 1925 | error = posix_acl_to_xattr(acl, value, size); |
1926 | if (size < 0) { | 1926 | if (error < 0) |
1927 | error = size; | ||
1928 | goto getout; | 1927 | goto getout; |
1929 | } | 1928 | size = error; |
1930 | } else | 1929 | } else |
1931 | size = 0; | 1930 | size = 0; |
1932 | 1931 | ||
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 0d858d0b25be..47152bf9a7f2 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -276,13 +276,29 @@ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) | |||
276 | return ret; | 276 | return ret; |
277 | } | 277 | } |
278 | 278 | ||
279 | /* This can also be called from ocfs2_write_zero_page() which has done | ||
280 | * it's own cluster locking. */ | ||
281 | int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, | ||
282 | unsigned from, unsigned to) | ||
283 | { | ||
284 | int ret; | ||
285 | |||
286 | down_read(&OCFS2_I(inode)->ip_alloc_sem); | ||
287 | |||
288 | ret = block_prepare_write(page, from, to, ocfs2_get_block); | ||
289 | |||
290 | up_read(&OCFS2_I(inode)->ip_alloc_sem); | ||
291 | |||
292 | return ret; | ||
293 | } | ||
294 | |||
279 | /* | 295 | /* |
280 | * ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called | 296 | * ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called |
281 | * from loopback. It must be able to perform its own locking around | 297 | * from loopback. It must be able to perform its own locking around |
282 | * ocfs2_get_block(). | 298 | * ocfs2_get_block(). |
283 | */ | 299 | */ |
284 | int ocfs2_prepare_write(struct file *file, struct page *page, | 300 | static int ocfs2_prepare_write(struct file *file, struct page *page, |
285 | unsigned from, unsigned to) | 301 | unsigned from, unsigned to) |
286 | { | 302 | { |
287 | struct inode *inode = page->mapping->host; | 303 | struct inode *inode = page->mapping->host; |
288 | int ret; | 304 | int ret; |
@@ -295,11 +311,7 @@ int ocfs2_prepare_write(struct file *file, struct page *page, | |||
295 | goto out; | 311 | goto out; |
296 | } | 312 | } |
297 | 313 | ||
298 | down_read(&OCFS2_I(inode)->ip_alloc_sem); | 314 | ret = ocfs2_prepare_write_nolock(inode, page, from, to); |
299 | |||
300 | ret = block_prepare_write(page, from, to, ocfs2_get_block); | ||
301 | |||
302 | up_read(&OCFS2_I(inode)->ip_alloc_sem); | ||
303 | 315 | ||
304 | ocfs2_meta_unlock(inode, 0); | 316 | ocfs2_meta_unlock(inode, 0); |
305 | out: | 317 | out: |
@@ -625,11 +637,31 @@ static ssize_t ocfs2_direct_IO(int rw, | |||
625 | int ret; | 637 | int ret; |
626 | 638 | ||
627 | mlog_entry_void(); | 639 | mlog_entry_void(); |
640 | |||
641 | /* | ||
642 | * We get PR data locks even for O_DIRECT. This allows | ||
643 | * concurrent O_DIRECT I/O but doesn't let O_DIRECT with | ||
644 | * extending and buffered zeroing writes race. If they did | ||
645 | * race then the buffered zeroing could be written back after | ||
646 | * the O_DIRECT I/O. It's one thing to tell people not to mix | ||
647 | * buffered and O_DIRECT writes, but expecting them to | ||
648 | * understand that file extension is also an implicit buffered | ||
649 | * write is too much. By getting the PR we force writeback of | ||
650 | * the buffered zeroing before proceeding. | ||
651 | */ | ||
652 | ret = ocfs2_data_lock(inode, 0); | ||
653 | if (ret < 0) { | ||
654 | mlog_errno(ret); | ||
655 | goto out; | ||
656 | } | ||
657 | ocfs2_data_unlock(inode, 0); | ||
658 | |||
628 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, | 659 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, |
629 | inode->i_sb->s_bdev, iov, offset, | 660 | inode->i_sb->s_bdev, iov, offset, |
630 | nr_segs, | 661 | nr_segs, |
631 | ocfs2_direct_IO_get_blocks, | 662 | ocfs2_direct_IO_get_blocks, |
632 | ocfs2_dio_end_io); | 663 | ocfs2_dio_end_io); |
664 | out: | ||
633 | mlog_exit(ret); | 665 | mlog_exit(ret); |
634 | return ret; | 666 | return ret; |
635 | } | 667 | } |
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index d40456d509a0..e88c3f0b8fa9 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h | |||
@@ -22,8 +22,8 @@ | |||
22 | #ifndef OCFS2_AOPS_H | 22 | #ifndef OCFS2_AOPS_H |
23 | #define OCFS2_AOPS_H | 23 | #define OCFS2_AOPS_H |
24 | 24 | ||
25 | int ocfs2_prepare_write(struct file *file, struct page *page, | 25 | int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, |
26 | unsigned from, unsigned to); | 26 | unsigned from, unsigned to); |
27 | 27 | ||
28 | struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode, | 28 | struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode, |
29 | struct page *page, | 29 | struct page *page, |
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 4601fc256f11..1a5c69071df6 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c | |||
@@ -569,7 +569,7 @@ static int ocfs2_extent_map_insert(struct inode *inode, | |||
569 | 569 | ||
570 | ret = -ENOMEM; | 570 | ret = -ENOMEM; |
571 | ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep, | 571 | ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep, |
572 | GFP_KERNEL); | 572 | GFP_NOFS); |
573 | if (!ctxt.new_ent) { | 573 | if (!ctxt.new_ent) { |
574 | mlog_errno(ret); | 574 | mlog_errno(ret); |
575 | return ret; | 575 | return ret; |
@@ -583,14 +583,14 @@ static int ocfs2_extent_map_insert(struct inode *inode, | |||
583 | if (ctxt.need_left && !ctxt.left_ent) { | 583 | if (ctxt.need_left && !ctxt.left_ent) { |
584 | ctxt.left_ent = | 584 | ctxt.left_ent = |
585 | kmem_cache_alloc(ocfs2_em_ent_cachep, | 585 | kmem_cache_alloc(ocfs2_em_ent_cachep, |
586 | GFP_KERNEL); | 586 | GFP_NOFS); |
587 | if (!ctxt.left_ent) | 587 | if (!ctxt.left_ent) |
588 | break; | 588 | break; |
589 | } | 589 | } |
590 | if (ctxt.need_right && !ctxt.right_ent) { | 590 | if (ctxt.need_right && !ctxt.right_ent) { |
591 | ctxt.right_ent = | 591 | ctxt.right_ent = |
592 | kmem_cache_alloc(ocfs2_em_ent_cachep, | 592 | kmem_cache_alloc(ocfs2_em_ent_cachep, |
593 | GFP_KERNEL); | 593 | GFP_NOFS); |
594 | if (!ctxt.right_ent) | 594 | if (!ctxt.right_ent) |
595 | break; | 595 | break; |
596 | } | 596 | } |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 581eb451a41a..a9559c874530 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -613,7 +613,8 @@ leave: | |||
613 | 613 | ||
614 | /* Some parts of this taken from generic_cont_expand, which turned out | 614 | /* Some parts of this taken from generic_cont_expand, which turned out |
615 | * to be too fragile to do exactly what we need without us having to | 615 | * to be too fragile to do exactly what we need without us having to |
616 | * worry about recursive locking in ->commit_write(). */ | 616 | * worry about recursive locking in ->prepare_write() and |
617 | * ->commit_write(). */ | ||
617 | static int ocfs2_write_zero_page(struct inode *inode, | 618 | static int ocfs2_write_zero_page(struct inode *inode, |
618 | u64 size) | 619 | u64 size) |
619 | { | 620 | { |
@@ -641,7 +642,7 @@ static int ocfs2_write_zero_page(struct inode *inode, | |||
641 | goto out; | 642 | goto out; |
642 | } | 643 | } |
643 | 644 | ||
644 | ret = ocfs2_prepare_write(NULL, page, offset, offset); | 645 | ret = ocfs2_prepare_write_nolock(inode, page, offset, offset); |
645 | if (ret < 0) { | 646 | if (ret < 0) { |
646 | mlog_errno(ret); | 647 | mlog_errno(ret); |
647 | goto out_unlock; | 648 | goto out_unlock; |
@@ -695,13 +696,26 @@ out: | |||
695 | return ret; | 696 | return ret; |
696 | } | 697 | } |
697 | 698 | ||
699 | /* | ||
700 | * A tail_to_skip value > 0 indicates that we're being called from | ||
701 | * ocfs2_file_aio_write(). This has the following implications: | ||
702 | * | ||
703 | * - we don't want to update i_size | ||
704 | * - di_bh will be NULL, which is fine because it's only used in the | ||
705 | * case where we want to update i_size. | ||
706 | * - ocfs2_zero_extend() will then only be filling the hole created | ||
707 | * between i_size and the start of the write. | ||
708 | */ | ||
698 | static int ocfs2_extend_file(struct inode *inode, | 709 | static int ocfs2_extend_file(struct inode *inode, |
699 | struct buffer_head *di_bh, | 710 | struct buffer_head *di_bh, |
700 | u64 new_i_size) | 711 | u64 new_i_size, |
712 | size_t tail_to_skip) | ||
701 | { | 713 | { |
702 | int ret = 0; | 714 | int ret = 0; |
703 | u32 clusters_to_add; | 715 | u32 clusters_to_add; |
704 | 716 | ||
717 | BUG_ON(!tail_to_skip && !di_bh); | ||
718 | |||
705 | /* setattr sometimes calls us like this. */ | 719 | /* setattr sometimes calls us like this. */ |
706 | if (new_i_size == 0) | 720 | if (new_i_size == 0) |
707 | goto out; | 721 | goto out; |
@@ -714,27 +728,44 @@ static int ocfs2_extend_file(struct inode *inode, | |||
714 | OCFS2_I(inode)->ip_clusters; | 728 | OCFS2_I(inode)->ip_clusters; |
715 | 729 | ||
716 | if (clusters_to_add) { | 730 | if (clusters_to_add) { |
717 | ret = ocfs2_extend_allocation(inode, clusters_to_add); | 731 | /* |
732 | * protect the pages that ocfs2_zero_extend is going to | ||
733 | * be pulling into the page cache.. we do this before the | ||
734 | * metadata extend so that we don't get into the situation | ||
735 | * where we've extended the metadata but can't get the data | ||
736 | * lock to zero. | ||
737 | */ | ||
738 | ret = ocfs2_data_lock(inode, 1); | ||
718 | if (ret < 0) { | 739 | if (ret < 0) { |
719 | mlog_errno(ret); | 740 | mlog_errno(ret); |
720 | goto out; | 741 | goto out; |
721 | } | 742 | } |
722 | 743 | ||
723 | ret = ocfs2_zero_extend(inode, new_i_size); | 744 | ret = ocfs2_extend_allocation(inode, clusters_to_add); |
724 | if (ret < 0) { | 745 | if (ret < 0) { |
725 | mlog_errno(ret); | 746 | mlog_errno(ret); |
726 | goto out; | 747 | goto out_unlock; |
727 | } | 748 | } |
728 | } | ||
729 | 749 | ||
730 | /* No allocation required, we just use this helper to | 750 | ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip); |
731 | * do a trivial update of i_size. */ | 751 | if (ret < 0) { |
732 | ret = ocfs2_simple_size_update(inode, di_bh, new_i_size); | 752 | mlog_errno(ret); |
733 | if (ret < 0) { | 753 | goto out_unlock; |
734 | mlog_errno(ret); | 754 | } |
735 | goto out; | 755 | } |
756 | |||
757 | if (!tail_to_skip) { | ||
758 | /* We're being called from ocfs2_setattr() which wants | ||
759 | * us to update i_size */ | ||
760 | ret = ocfs2_simple_size_update(inode, di_bh, new_i_size); | ||
761 | if (ret < 0) | ||
762 | mlog_errno(ret); | ||
736 | } | 763 | } |
737 | 764 | ||
765 | out_unlock: | ||
766 | if (clusters_to_add) /* this is the only case in which we lock */ | ||
767 | ocfs2_data_unlock(inode, 1); | ||
768 | |||
738 | out: | 769 | out: |
739 | return ret; | 770 | return ret; |
740 | } | 771 | } |
@@ -793,7 +824,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) | |||
793 | if (i_size_read(inode) > attr->ia_size) | 824 | if (i_size_read(inode) > attr->ia_size) |
794 | status = ocfs2_truncate_file(inode, bh, attr->ia_size); | 825 | status = ocfs2_truncate_file(inode, bh, attr->ia_size); |
795 | else | 826 | else |
796 | status = ocfs2_extend_file(inode, bh, attr->ia_size); | 827 | status = ocfs2_extend_file(inode, bh, attr->ia_size, 0); |
797 | if (status < 0) { | 828 | if (status < 0) { |
798 | if (status != -ENOSPC) | 829 | if (status != -ENOSPC) |
799 | mlog_errno(status); | 830 | mlog_errno(status); |
@@ -1049,21 +1080,12 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, | |||
1049 | if (!clusters) | 1080 | if (!clusters) |
1050 | break; | 1081 | break; |
1051 | 1082 | ||
1052 | ret = ocfs2_extend_allocation(inode, clusters); | 1083 | ret = ocfs2_extend_file(inode, NULL, newsize, count); |
1053 | if (ret < 0) { | 1084 | if (ret < 0) { |
1054 | if (ret != -ENOSPC) | 1085 | if (ret != -ENOSPC) |
1055 | mlog_errno(ret); | 1086 | mlog_errno(ret); |
1056 | goto out; | 1087 | goto out; |
1057 | } | 1088 | } |
1058 | |||
1059 | /* Fill any holes which would've been created by this | ||
1060 | * write. If we're O_APPEND, this will wind up | ||
1061 | * (correctly) being a noop. */ | ||
1062 | ret = ocfs2_zero_extend(inode, (u64) newsize - count); | ||
1063 | if (ret < 0) { | ||
1064 | mlog_errno(ret); | ||
1065 | goto out; | ||
1066 | } | ||
1067 | break; | 1089 | break; |
1068 | } | 1090 | } |
1069 | 1091 | ||
@@ -1146,6 +1168,22 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, | |||
1146 | ocfs2_iocb_set_rw_locked(iocb); | 1168 | ocfs2_iocb_set_rw_locked(iocb); |
1147 | } | 1169 | } |
1148 | 1170 | ||
1171 | /* | ||
1172 | * We're fine letting folks race truncates and extending | ||
1173 | * writes with read across the cluster, just like they can | ||
1174 | * locally. Hence no rw_lock during read. | ||
1175 | * | ||
1176 | * Take and drop the meta data lock to update inode fields | ||
1177 | * like i_size. This allows the checks down below | ||
1178 | * generic_file_aio_read() a chance of actually working. | ||
1179 | */ | ||
1180 | ret = ocfs2_meta_lock(inode, NULL, NULL, 0); | ||
1181 | if (ret < 0) { | ||
1182 | mlog_errno(ret); | ||
1183 | goto bail; | ||
1184 | } | ||
1185 | ocfs2_meta_unlock(inode, 0); | ||
1186 | |||
1149 | ret = generic_file_aio_read(iocb, buf, count, iocb->ki_pos); | 1187 | ret = generic_file_aio_read(iocb, buf, count, iocb->ki_pos); |
1150 | if (ret == -EINVAL) | 1188 | if (ret == -EINVAL) |
1151 | mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n"); | 1189 | mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n"); |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 6a610ae53583..eebc3cfa6be8 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -117,7 +117,7 @@ struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb) | |||
117 | { | 117 | { |
118 | struct ocfs2_journal_handle *retval = NULL; | 118 | struct ocfs2_journal_handle *retval = NULL; |
119 | 119 | ||
120 | retval = kcalloc(1, sizeof(*retval), GFP_KERNEL); | 120 | retval = kcalloc(1, sizeof(*retval), GFP_NOFS); |
121 | if (!retval) { | 121 | if (!retval) { |
122 | mlog(ML_ERROR, "Failed to allocate memory for journal " | 122 | mlog(ML_ERROR, "Failed to allocate memory for journal " |
123 | "handle!\n"); | 123 | "handle!\n"); |
@@ -870,9 +870,11 @@ static int ocfs2_force_read_journal(struct inode *inode) | |||
870 | if (p_blocks > CONCURRENT_JOURNAL_FILL) | 870 | if (p_blocks > CONCURRENT_JOURNAL_FILL) |
871 | p_blocks = CONCURRENT_JOURNAL_FILL; | 871 | p_blocks = CONCURRENT_JOURNAL_FILL; |
872 | 872 | ||
873 | /* We are reading journal data which should not | ||
874 | * be put in the uptodate cache */ | ||
873 | status = ocfs2_read_blocks(OCFS2_SB(inode->i_sb), | 875 | status = ocfs2_read_blocks(OCFS2_SB(inode->i_sb), |
874 | p_blkno, p_blocks, bhs, 0, | 876 | p_blkno, p_blocks, bhs, 0, |
875 | inode); | 877 | NULL); |
876 | if (status < 0) { | 878 | if (status < 0) { |
877 | mlog_errno(status); | 879 | mlog_errno(status); |
878 | goto bail; | 880 | goto bail; |
@@ -982,7 +984,7 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, | |||
982 | { | 984 | { |
983 | struct ocfs2_la_recovery_item *item; | 985 | struct ocfs2_la_recovery_item *item; |
984 | 986 | ||
985 | item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_KERNEL); | 987 | item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS); |
986 | if (!item) { | 988 | if (!item) { |
987 | /* Though we wish to avoid it, we are in fact safe in | 989 | /* Though we wish to avoid it, we are in fact safe in |
988 | * skipping local alloc cleanup as fsck.ocfs2 is more | 990 | * skipping local alloc cleanup as fsck.ocfs2 is more |
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c index 04a684dfdd96..b8a00a793326 100644 --- a/fs/ocfs2/uptodate.c +++ b/fs/ocfs2/uptodate.c | |||
@@ -337,7 +337,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, | |||
337 | (unsigned long long)oi->ip_blkno, | 337 | (unsigned long long)oi->ip_blkno, |
338 | (unsigned long long)block, expand_tree); | 338 | (unsigned long long)block, expand_tree); |
339 | 339 | ||
340 | new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_KERNEL); | 340 | new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); |
341 | if (!new) { | 341 | if (!new) { |
342 | mlog_errno(-ENOMEM); | 342 | mlog_errno(-ENOMEM); |
343 | return; | 343 | return; |
@@ -349,7 +349,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, | |||
349 | * has no way of tracking that. */ | 349 | * has no way of tracking that. */ |
350 | for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) { | 350 | for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) { |
351 | tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep, | 351 | tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep, |
352 | GFP_KERNEL); | 352 | GFP_NOFS); |
353 | if (!tree[i]) { | 353 | if (!tree[i]) { |
354 | mlog_errno(-ENOMEM); | 354 | mlog_errno(-ENOMEM); |
355 | goto out_free; | 355 | goto out_free; |
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c index 53049a204197..ee42765a8553 100644 --- a/fs/ocfs2/vote.c +++ b/fs/ocfs2/vote.c | |||
@@ -586,7 +586,7 @@ static struct ocfs2_net_wait_ctxt *ocfs2_new_net_wait_ctxt(unsigned int response | |||
586 | { | 586 | { |
587 | struct ocfs2_net_wait_ctxt *w; | 587 | struct ocfs2_net_wait_ctxt *w; |
588 | 588 | ||
589 | w = kcalloc(1, sizeof(*w), GFP_KERNEL); | 589 | w = kcalloc(1, sizeof(*w), GFP_NOFS); |
590 | if (!w) { | 590 | if (!w) { |
591 | mlog_errno(-ENOMEM); | 591 | mlog_errno(-ENOMEM); |
592 | goto bail; | 592 | goto bail; |
@@ -749,7 +749,7 @@ static struct ocfs2_vote_msg * ocfs2_new_vote_request(struct ocfs2_super *osb, | |||
749 | 749 | ||
750 | BUG_ON(!ocfs2_is_valid_vote_request(type)); | 750 | BUG_ON(!ocfs2_is_valid_vote_request(type)); |
751 | 751 | ||
752 | request = kcalloc(1, sizeof(*request), GFP_KERNEL); | 752 | request = kcalloc(1, sizeof(*request), GFP_NOFS); |
753 | if (!request) { | 753 | if (!request) { |
754 | mlog_errno(-ENOMEM); | 754 | mlog_errno(-ENOMEM); |
755 | } else { | 755 | } else { |
@@ -1129,7 +1129,7 @@ static int ocfs2_handle_vote_message(struct o2net_msg *msg, | |||
1129 | struct ocfs2_super *osb = data; | 1129 | struct ocfs2_super *osb = data; |
1130 | struct ocfs2_vote_work *work; | 1130 | struct ocfs2_vote_work *work; |
1131 | 1131 | ||
1132 | work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_KERNEL); | 1132 | work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_NOFS); |
1133 | if (!work) { | 1133 | if (!work) { |
1134 | status = -ENOMEM; | 1134 | status = -ENOMEM; |
1135 | mlog_errno(status); | 1135 | mlog_errno(status); |
@@ -1124,7 +1124,6 @@ asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, | |||
1124 | prevent_tail_call(ret); | 1124 | prevent_tail_call(ret); |
1125 | return ret; | 1125 | return ret; |
1126 | } | 1126 | } |
1127 | EXPORT_SYMBOL_GPL(sys_openat); | ||
1128 | 1127 | ||
1129 | #ifndef __alpha__ | 1128 | #ifndef __alpha__ |
1130 | 1129 | ||
diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 45ae7dd3c650..7ef1f094de91 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c | |||
@@ -533,6 +533,7 @@ void del_gendisk(struct gendisk *disk) | |||
533 | 533 | ||
534 | devfs_remove_disk(disk); | 534 | devfs_remove_disk(disk); |
535 | 535 | ||
536 | kobject_uevent(&disk->kobj, KOBJ_REMOVE); | ||
536 | if (disk->holder_dir) | 537 | if (disk->holder_dir) |
537 | kobject_unregister(disk->holder_dir); | 538 | kobject_unregister(disk->holder_dir); |
538 | if (disk->slave_dir) | 539 | if (disk->slave_dir) |
@@ -545,7 +546,7 @@ void del_gendisk(struct gendisk *disk) | |||
545 | kfree(disk_name); | 546 | kfree(disk_name); |
546 | } | 547 | } |
547 | put_device(disk->driverfs_dev); | 548 | put_device(disk->driverfs_dev); |
549 | disk->driverfs_dev = NULL; | ||
548 | } | 550 | } |
549 | kobject_uevent(&disk->kobj, KOBJ_REMOVE); | ||
550 | kobject_del(&disk->kobj); | 551 | kobject_del(&disk->kobj); |
551 | } | 552 | } |
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c index 34c7a11d91f0..70d9c5a37f5a 100644 --- a/fs/smbfs/dir.c +++ b/fs/smbfs/dir.c | |||
@@ -434,6 +434,11 @@ smb_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
434 | if (dentry->d_name.len > SMB_MAXNAMELEN) | 434 | if (dentry->d_name.len > SMB_MAXNAMELEN) |
435 | goto out; | 435 | goto out; |
436 | 436 | ||
437 | /* Do not allow lookup of names with backslashes in */ | ||
438 | error = -EINVAL; | ||
439 | if (memchr(dentry->d_name.name, '\\', dentry->d_name.len)) | ||
440 | goto out; | ||
441 | |||
437 | lock_kernel(); | 442 | lock_kernel(); |
438 | error = smb_proc_getattr(dentry, &finfo); | 443 | error = smb_proc_getattr(dentry, &finfo); |
439 | #ifdef SMBFS_PARANOIA | 444 | #ifdef SMBFS_PARANOIA |
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c index c71c375863cc..c71dd2760d32 100644 --- a/fs/smbfs/request.c +++ b/fs/smbfs/request.c | |||
@@ -339,9 +339,11 @@ int smb_add_request(struct smb_request *req) | |||
339 | /* | 339 | /* |
340 | * On timeout or on interrupt we want to try and remove the | 340 | * On timeout or on interrupt we want to try and remove the |
341 | * request from the recvq/xmitq. | 341 | * request from the recvq/xmitq. |
342 | * First check if the request is still part of a queue. (May | ||
343 | * have been removed by some error condition) | ||
342 | */ | 344 | */ |
343 | smb_lock_server(server); | 345 | smb_lock_server(server); |
344 | if (!(req->rq_flags & SMB_REQ_RECEIVED)) { | 346 | if (!list_empty(&req->rq_queue)) { |
345 | list_del_init(&req->rq_queue); | 347 | list_del_init(&req->rq_queue); |
346 | smb_rput(req); | 348 | smb_rput(req); |
347 | } | 349 | } |