diff options
author | KaiGai Kohei <kaigai@ak.jp.nec.com> | 2006-05-18 11:43:53 -0400 |
---|---|---|
committer | KaiGai Kohei <kaigai@ak.jp.nec.com> | 2006-05-18 11:43:53 -0400 |
commit | 20a92fc74c5c91c7bc5693d51acc2b99aceb0465 (patch) | |
tree | 41bf535f38ff1a29c560bcf622e9b4ef03c2c106 /fs | |
parent | 21b9879bf2817aca343cdda11ade6a87f5373e74 (diff) | |
parent | f6a673b3f4f93c1c50e1b18f29254b0531b722a8 (diff) |
Merge git://git.infradead.org/mtd-2.6
Diffstat (limited to 'fs')
-rw-r--r-- | fs/9p/fcall.c | 21 | ||||
-rw-r--r-- | fs/9p/mux.c | 222 | ||||
-rw-r--r-- | fs/9p/mux.h | 4 | ||||
-rw-r--r-- | fs/9p/vfs_file.c | 13 | ||||
-rw-r--r-- | fs/9p/vfs_inode.c | 19 | ||||
-rw-r--r-- | fs/autofs4/autofs_i.h | 5 | ||||
-rw-r--r-- | fs/autofs4/root.c | 10 | ||||
-rw-r--r-- | fs/autofs4/waitq.c | 77 | ||||
-rw-r--r-- | fs/compat.c | 4 | ||||
-rw-r--r-- | fs/ext3/inode.c | 13 | ||||
-rw-r--r-- | fs/jffs2/compr.c | 4 | ||||
-rw-r--r-- | fs/jffs2/compr_zlib.c | 4 | ||||
-rw-r--r-- | fs/jffs2/file.c | 20 | ||||
-rw-r--r-- | fs/jffs2/nodelist.c | 4 | ||||
-rw-r--r-- | fs/jffs2/scan.c | 6 | ||||
-rw-r--r-- | fs/jffs2/summary.c | 2 | ||||
-rw-r--r-- | fs/jffs2/super.c | 14 | ||||
-rw-r--r-- | fs/locks.c | 21 | ||||
-rw-r--r-- | fs/namespace.c | 7 | ||||
-rw-r--r-- | fs/open.c | 1 | ||||
-rw-r--r-- | fs/partitions/check.c | 3 | ||||
-rw-r--r-- | fs/smbfs/dir.c | 5 | ||||
-rw-r--r-- | fs/smbfs/request.c | 4 | ||||
-rw-r--r-- | fs/splice.c | 74 | ||||
-rw-r--r-- | fs/xfs/xfs_alloc.c | 5 | ||||
-rw-r--r-- | fs/xfs/xfs_rename.c | 12 | ||||
-rw-r--r-- | fs/xfs/xfs_vfsops.c | 27 | ||||
-rw-r--r-- | fs/xfs/xfs_vnodeops.c | 2 |
28 files changed, 361 insertions, 242 deletions
diff --git a/fs/9p/fcall.c b/fs/9p/fcall.c index 71742ba150c4..6f2617820a4e 100644 --- a/fs/9p/fcall.c +++ b/fs/9p/fcall.c | |||
@@ -98,23 +98,20 @@ v9fs_t_attach(struct v9fs_session_info *v9ses, char *uname, char *aname, | |||
98 | static void v9fs_t_clunk_cb(void *a, struct v9fs_fcall *tc, | 98 | static void v9fs_t_clunk_cb(void *a, struct v9fs_fcall *tc, |
99 | struct v9fs_fcall *rc, int err) | 99 | struct v9fs_fcall *rc, int err) |
100 | { | 100 | { |
101 | int fid; | 101 | int fid, id; |
102 | struct v9fs_session_info *v9ses; | 102 | struct v9fs_session_info *v9ses; |
103 | 103 | ||
104 | if (err) | 104 | id = 0; |
105 | return; | ||
106 | |||
107 | fid = tc->params.tclunk.fid; | 105 | fid = tc->params.tclunk.fid; |
108 | kfree(tc); | 106 | if (rc) |
109 | 107 | id = rc->id; | |
110 | if (!rc) | ||
111 | return; | ||
112 | |||
113 | v9ses = a; | ||
114 | if (rc->id == RCLUNK) | ||
115 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
116 | 108 | ||
109 | kfree(tc); | ||
117 | kfree(rc); | 110 | kfree(rc); |
111 | if (id == RCLUNK) { | ||
112 | v9ses = a; | ||
113 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
114 | } | ||
118 | } | 115 | } |
119 | 116 | ||
120 | /** | 117 | /** |
diff --git a/fs/9p/mux.c b/fs/9p/mux.c index 3e5b124a7212..f4407eb276c7 100644 --- a/fs/9p/mux.c +++ b/fs/9p/mux.c | |||
@@ -50,15 +50,23 @@ enum { | |||
50 | Wpending = 8, /* can write */ | 50 | Wpending = 8, /* can write */ |
51 | }; | 51 | }; |
52 | 52 | ||
53 | enum { | ||
54 | None, | ||
55 | Flushing, | ||
56 | Flushed, | ||
57 | }; | ||
58 | |||
53 | struct v9fs_mux_poll_task; | 59 | struct v9fs_mux_poll_task; |
54 | 60 | ||
55 | struct v9fs_req { | 61 | struct v9fs_req { |
62 | spinlock_t lock; | ||
56 | int tag; | 63 | int tag; |
57 | struct v9fs_fcall *tcall; | 64 | struct v9fs_fcall *tcall; |
58 | struct v9fs_fcall *rcall; | 65 | struct v9fs_fcall *rcall; |
59 | int err; | 66 | int err; |
60 | v9fs_mux_req_callback cb; | 67 | v9fs_mux_req_callback cb; |
61 | void *cba; | 68 | void *cba; |
69 | int flush; | ||
62 | struct list_head req_list; | 70 | struct list_head req_list; |
63 | }; | 71 | }; |
64 | 72 | ||
@@ -96,8 +104,8 @@ struct v9fs_mux_poll_task { | |||
96 | 104 | ||
97 | struct v9fs_mux_rpc { | 105 | struct v9fs_mux_rpc { |
98 | struct v9fs_mux_data *m; | 106 | struct v9fs_mux_data *m; |
99 | struct v9fs_req *req; | ||
100 | int err; | 107 | int err; |
108 | struct v9fs_fcall *tcall; | ||
101 | struct v9fs_fcall *rcall; | 109 | struct v9fs_fcall *rcall; |
102 | wait_queue_head_t wqueue; | 110 | wait_queue_head_t wqueue; |
103 | }; | 111 | }; |
@@ -524,10 +532,9 @@ again: | |||
524 | 532 | ||
525 | static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) | 533 | static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) |
526 | { | 534 | { |
527 | int ecode, tag; | 535 | int ecode; |
528 | struct v9fs_str *ename; | 536 | struct v9fs_str *ename; |
529 | 537 | ||
530 | tag = req->tag; | ||
531 | if (!req->err && req->rcall->id == RERROR) { | 538 | if (!req->err && req->rcall->id == RERROR) { |
532 | ecode = req->rcall->params.rerror.errno; | 539 | ecode = req->rcall->params.rerror.errno; |
533 | ename = &req->rcall->params.rerror.error; | 540 | ename = &req->rcall->params.rerror.error; |
@@ -553,23 +560,6 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) | |||
553 | if (!req->err) | 560 | if (!req->err) |
554 | req->err = -EIO; | 561 | req->err = -EIO; |
555 | } | 562 | } |
556 | |||
557 | if (req->err == ERREQFLUSH) | ||
558 | return; | ||
559 | |||
560 | if (req->cb) { | ||
561 | dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n", | ||
562 | req->tcall, req->rcall); | ||
563 | |||
564 | (*req->cb) (req->cba, req->tcall, req->rcall, req->err); | ||
565 | req->cb = NULL; | ||
566 | } else | ||
567 | kfree(req->rcall); | ||
568 | |||
569 | v9fs_mux_put_tag(m, tag); | ||
570 | |||
571 | wake_up(&m->equeue); | ||
572 | kfree(req); | ||
573 | } | 563 | } |
574 | 564 | ||
575 | /** | 565 | /** |
@@ -669,17 +659,26 @@ static void v9fs_read_work(void *a) | |||
669 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | 659 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { |
670 | if (rreq->tag == rcall->tag) { | 660 | if (rreq->tag == rcall->tag) { |
671 | req = rreq; | 661 | req = rreq; |
672 | req->rcall = rcall; | 662 | if (req->flush != Flushing) |
673 | list_del(&req->req_list); | 663 | list_del(&req->req_list); |
674 | spin_unlock(&m->lock); | ||
675 | process_request(m, req); | ||
676 | break; | 664 | break; |
677 | } | 665 | } |
678 | |||
679 | } | 666 | } |
667 | spin_unlock(&m->lock); | ||
680 | 668 | ||
681 | if (!req) { | 669 | if (req) { |
682 | spin_unlock(&m->lock); | 670 | req->rcall = rcall; |
671 | process_request(m, req); | ||
672 | |||
673 | if (req->flush != Flushing) { | ||
674 | if (req->cb) | ||
675 | (*req->cb) (req, req->cba); | ||
676 | else | ||
677 | kfree(req->rcall); | ||
678 | |||
679 | wake_up(&m->equeue); | ||
680 | } | ||
681 | } else { | ||
683 | if (err >= 0 && rcall->id != RFLUSH) | 682 | if (err >= 0 && rcall->id != RFLUSH) |
684 | dprintk(DEBUG_ERROR, | 683 | dprintk(DEBUG_ERROR, |
685 | "unexpected response mux %p id %d tag %d\n", | 684 | "unexpected response mux %p id %d tag %d\n", |
@@ -746,7 +745,6 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m, | |||
746 | return ERR_PTR(-ENOMEM); | 745 | return ERR_PTR(-ENOMEM); |
747 | 746 | ||
748 | v9fs_set_tag(tc, n); | 747 | v9fs_set_tag(tc, n); |
749 | |||
750 | if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) { | 748 | if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) { |
751 | char buf[150]; | 749 | char buf[150]; |
752 | 750 | ||
@@ -754,12 +752,14 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m, | |||
754 | printk(KERN_NOTICE "<<< %p %s\n", m, buf); | 752 | printk(KERN_NOTICE "<<< %p %s\n", m, buf); |
755 | } | 753 | } |
756 | 754 | ||
755 | spin_lock_init(&req->lock); | ||
757 | req->tag = n; | 756 | req->tag = n; |
758 | req->tcall = tc; | 757 | req->tcall = tc; |
759 | req->rcall = NULL; | 758 | req->rcall = NULL; |
760 | req->err = 0; | 759 | req->err = 0; |
761 | req->cb = cb; | 760 | req->cb = cb; |
762 | req->cba = cba; | 761 | req->cba = cba; |
762 | req->flush = None; | ||
763 | 763 | ||
764 | spin_lock(&m->lock); | 764 | spin_lock(&m->lock); |
765 | list_add_tail(&req->req_list, &m->unsent_req_list); | 765 | list_add_tail(&req->req_list, &m->unsent_req_list); |
@@ -776,72 +776,108 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m, | |||
776 | return req; | 776 | return req; |
777 | } | 777 | } |
778 | 778 | ||
779 | static void v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc, | 779 | static void v9fs_mux_free_request(struct v9fs_mux_data *m, struct v9fs_req *req) |
780 | struct v9fs_fcall *rc, int err) | 780 | { |
781 | v9fs_mux_put_tag(m, req->tag); | ||
782 | kfree(req); | ||
783 | } | ||
784 | |||
785 | static void v9fs_mux_flush_cb(struct v9fs_req *freq, void *a) | ||
781 | { | 786 | { |
782 | v9fs_mux_req_callback cb; | 787 | v9fs_mux_req_callback cb; |
783 | int tag; | 788 | int tag; |
784 | struct v9fs_mux_data *m; | 789 | struct v9fs_mux_data *m; |
785 | struct v9fs_req *req, *rptr; | 790 | struct v9fs_req *req, *rreq, *rptr; |
786 | 791 | ||
787 | m = a; | 792 | m = a; |
788 | dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc, | 793 | dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, |
789 | rc, err, tc->params.tflush.oldtag); | 794 | freq->tcall, freq->rcall, freq->err, |
795 | freq->tcall->params.tflush.oldtag); | ||
790 | 796 | ||
791 | spin_lock(&m->lock); | 797 | spin_lock(&m->lock); |
792 | cb = NULL; | 798 | cb = NULL; |
793 | tag = tc->params.tflush.oldtag; | 799 | tag = freq->tcall->params.tflush.oldtag; |
794 | list_for_each_entry_safe(req, rptr, &m->req_list, req_list) { | 800 | req = NULL; |
795 | if (req->tag == tag) { | 801 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { |
802 | if (rreq->tag == tag) { | ||
803 | req = rreq; | ||
796 | list_del(&req->req_list); | 804 | list_del(&req->req_list); |
797 | if (req->cb) { | ||
798 | cb = req->cb; | ||
799 | req->cb = NULL; | ||
800 | spin_unlock(&m->lock); | ||
801 | (*cb) (req->cba, req->tcall, req->rcall, | ||
802 | req->err); | ||
803 | } | ||
804 | kfree(req); | ||
805 | wake_up(&m->equeue); | ||
806 | break; | 805 | break; |
807 | } | 806 | } |
808 | } | 807 | } |
808 | spin_unlock(&m->lock); | ||
809 | 809 | ||
810 | if (!cb) | 810 | if (req) { |
811 | spin_unlock(&m->lock); | 811 | spin_lock(&req->lock); |
812 | req->flush = Flushed; | ||
813 | spin_unlock(&req->lock); | ||
814 | |||
815 | if (req->cb) | ||
816 | (*req->cb) (req, req->cba); | ||
817 | else | ||
818 | kfree(req->rcall); | ||
819 | |||
820 | wake_up(&m->equeue); | ||
821 | } | ||
812 | 822 | ||
813 | v9fs_mux_put_tag(m, tag); | 823 | kfree(freq->tcall); |
814 | kfree(tc); | 824 | kfree(freq->rcall); |
815 | kfree(rc); | 825 | v9fs_mux_free_request(m, freq); |
816 | } | 826 | } |
817 | 827 | ||
818 | static void | 828 | static int |
819 | v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req) | 829 | v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req) |
820 | { | 830 | { |
821 | struct v9fs_fcall *fc; | 831 | struct v9fs_fcall *fc; |
832 | struct v9fs_req *rreq, *rptr; | ||
822 | 833 | ||
823 | dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); | 834 | dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); |
824 | 835 | ||
836 | /* if a response was received for a request, do nothing */ | ||
837 | spin_lock(&req->lock); | ||
838 | if (req->rcall || req->err) { | ||
839 | spin_unlock(&req->lock); | ||
840 | dprintk(DEBUG_MUX, "mux %p req %p response already received\n", m, req); | ||
841 | return 0; | ||
842 | } | ||
843 | |||
844 | req->flush = Flushing; | ||
845 | spin_unlock(&req->lock); | ||
846 | |||
847 | spin_lock(&m->lock); | ||
848 | /* if the request is not sent yet, just remove it from the list */ | ||
849 | list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) { | ||
850 | if (rreq->tag == req->tag) { | ||
851 | dprintk(DEBUG_MUX, "mux %p req %p request is not sent yet\n", m, req); | ||
852 | list_del(&rreq->req_list); | ||
853 | req->flush = Flushed; | ||
854 | spin_unlock(&m->lock); | ||
855 | if (req->cb) | ||
856 | (*req->cb) (req, req->cba); | ||
857 | return 0; | ||
858 | } | ||
859 | } | ||
860 | spin_unlock(&m->lock); | ||
861 | |||
862 | clear_thread_flag(TIF_SIGPENDING); | ||
825 | fc = v9fs_create_tflush(req->tag); | 863 | fc = v9fs_create_tflush(req->tag); |
826 | v9fs_send_request(m, fc, v9fs_mux_flush_cb, m); | 864 | v9fs_send_request(m, fc, v9fs_mux_flush_cb, m); |
865 | return 1; | ||
827 | } | 866 | } |
828 | 867 | ||
829 | static void | 868 | static void |
830 | v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err) | 869 | v9fs_mux_rpc_cb(struct v9fs_req *req, void *a) |
831 | { | 870 | { |
832 | struct v9fs_mux_rpc *r; | 871 | struct v9fs_mux_rpc *r; |
833 | 872 | ||
834 | if (err == ERREQFLUSH) { | 873 | dprintk(DEBUG_MUX, "req %p r %p\n", req, a); |
835 | kfree(rc); | ||
836 | dprintk(DEBUG_MUX, "err req flush\n"); | ||
837 | return; | ||
838 | } | ||
839 | |||
840 | r = a; | 874 | r = a; |
841 | dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req, | 875 | r->rcall = req->rcall; |
842 | tc, rc, err); | 876 | r->err = req->err; |
843 | r->rcall = rc; | 877 | |
844 | r->err = err; | 878 | if (req->flush!=None && !req->err) |
879 | r->err = -ERESTARTSYS; | ||
880 | |||
845 | wake_up(&r->wqueue); | 881 | wake_up(&r->wqueue); |
846 | } | 882 | } |
847 | 883 | ||
@@ -856,12 +892,13 @@ int | |||
856 | v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, | 892 | v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, |
857 | struct v9fs_fcall **rc) | 893 | struct v9fs_fcall **rc) |
858 | { | 894 | { |
859 | int err; | 895 | int err, sigpending; |
860 | unsigned long flags; | 896 | unsigned long flags; |
861 | struct v9fs_req *req; | 897 | struct v9fs_req *req; |
862 | struct v9fs_mux_rpc r; | 898 | struct v9fs_mux_rpc r; |
863 | 899 | ||
864 | r.err = 0; | 900 | r.err = 0; |
901 | r.tcall = tc; | ||
865 | r.rcall = NULL; | 902 | r.rcall = NULL; |
866 | r.m = m; | 903 | r.m = m; |
867 | init_waitqueue_head(&r.wqueue); | 904 | init_waitqueue_head(&r.wqueue); |
@@ -869,48 +906,50 @@ v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, | |||
869 | if (rc) | 906 | if (rc) |
870 | *rc = NULL; | 907 | *rc = NULL; |
871 | 908 | ||
909 | sigpending = 0; | ||
910 | if (signal_pending(current)) { | ||
911 | sigpending = 1; | ||
912 | clear_thread_flag(TIF_SIGPENDING); | ||
913 | } | ||
914 | |||
872 | req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r); | 915 | req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r); |
873 | if (IS_ERR(req)) { | 916 | if (IS_ERR(req)) { |
874 | err = PTR_ERR(req); | 917 | err = PTR_ERR(req); |
875 | dprintk(DEBUG_MUX, "error %d\n", err); | 918 | dprintk(DEBUG_MUX, "error %d\n", err); |
876 | return PTR_ERR(req); | 919 | return err; |
877 | } | 920 | } |
878 | 921 | ||
879 | r.req = req; | ||
880 | dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc, | ||
881 | req->tag, &r, req); | ||
882 | err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); | 922 | err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); |
883 | if (r.err < 0) | 923 | if (r.err < 0) |
884 | err = r.err; | 924 | err = r.err; |
885 | 925 | ||
886 | if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) { | 926 | if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) { |
887 | spin_lock(&m->lock); | 927 | if (v9fs_mux_flush_request(m, req)) { |
888 | req->tcall = NULL; | 928 | /* wait until we get response of the flush message */ |
889 | req->err = ERREQFLUSH; | 929 | do { |
890 | spin_unlock(&m->lock); | 930 | clear_thread_flag(TIF_SIGPENDING); |
931 | err = wait_event_interruptible(r.wqueue, | ||
932 | r.rcall || r.err); | ||
933 | } while (!r.rcall && !r.err && err==-ERESTARTSYS && | ||
934 | m->trans->status==Connected && !m->err); | ||
935 | } | ||
936 | sigpending = 1; | ||
937 | } | ||
891 | 938 | ||
892 | clear_thread_flag(TIF_SIGPENDING); | 939 | if (sigpending) { |
893 | v9fs_mux_flush_request(m, req); | ||
894 | spin_lock_irqsave(¤t->sighand->siglock, flags); | 940 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
895 | recalc_sigpending(); | 941 | recalc_sigpending(); |
896 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 942 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
897 | } | 943 | } |
898 | 944 | ||
899 | if (!err) { | 945 | if (rc) |
900 | if (r.rcall) | 946 | *rc = r.rcall; |
901 | dprintk(DEBUG_MUX, "got response id %d tag %d\n", | 947 | else |
902 | r.rcall->id, r.rcall->tag); | ||
903 | |||
904 | if (rc) | ||
905 | *rc = r.rcall; | ||
906 | else | ||
907 | kfree(r.rcall); | ||
908 | } else { | ||
909 | kfree(r.rcall); | 948 | kfree(r.rcall); |
910 | dprintk(DEBUG_MUX, "got error %d\n", err); | 949 | |
911 | if (err > 0) | 950 | v9fs_mux_free_request(m, req); |
912 | err = -EIO; | 951 | if (err > 0) |
913 | } | 952 | err = -EIO; |
914 | 953 | ||
915 | return err; | 954 | return err; |
916 | } | 955 | } |
@@ -951,12 +990,15 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err) | |||
951 | struct v9fs_req *req, *rtmp; | 990 | struct v9fs_req *req, *rtmp; |
952 | LIST_HEAD(cancel_list); | 991 | LIST_HEAD(cancel_list); |
953 | 992 | ||
954 | dprintk(DEBUG_MUX, "mux %p err %d\n", m, err); | 993 | dprintk(DEBUG_ERROR, "mux %p err %d\n", m, err); |
955 | m->err = err; | 994 | m->err = err; |
956 | spin_lock(&m->lock); | 995 | spin_lock(&m->lock); |
957 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { | 996 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { |
958 | list_move(&req->req_list, &cancel_list); | 997 | list_move(&req->req_list, &cancel_list); |
959 | } | 998 | } |
999 | list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { | ||
1000 | list_move(&req->req_list, &cancel_list); | ||
1001 | } | ||
960 | spin_unlock(&m->lock); | 1002 | spin_unlock(&m->lock); |
961 | 1003 | ||
962 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { | 1004 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { |
@@ -965,11 +1007,9 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err) | |||
965 | req->err = err; | 1007 | req->err = err; |
966 | 1008 | ||
967 | if (req->cb) | 1009 | if (req->cb) |
968 | (*req->cb) (req->cba, req->tcall, req->rcall, req->err); | 1010 | (*req->cb) (req, req->cba); |
969 | else | 1011 | else |
970 | kfree(req->rcall); | 1012 | kfree(req->rcall); |
971 | |||
972 | kfree(req); | ||
973 | } | 1013 | } |
974 | 1014 | ||
975 | wake_up(&m->equeue); | 1015 | wake_up(&m->equeue); |
diff --git a/fs/9p/mux.h b/fs/9p/mux.h index e90bfd32ea42..fb10c50186a1 100644 --- a/fs/9p/mux.h +++ b/fs/9p/mux.h | |||
@@ -24,6 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | struct v9fs_mux_data; | 26 | struct v9fs_mux_data; |
27 | struct v9fs_req; | ||
27 | 28 | ||
28 | /** | 29 | /** |
29 | * v9fs_mux_req_callback - callback function that is called when the | 30 | * v9fs_mux_req_callback - callback function that is called when the |
@@ -36,8 +37,7 @@ struct v9fs_mux_data; | |||
36 | * @rc - response call | 37 | * @rc - response call |
37 | * @err - error code (non-zero if error occured) | 38 | * @err - error code (non-zero if error occured) |
38 | */ | 39 | */ |
39 | typedef void (*v9fs_mux_req_callback)(void *a, struct v9fs_fcall *tc, | 40 | typedef void (*v9fs_mux_req_callback)(struct v9fs_req *req, void *a); |
40 | struct v9fs_fcall *rc, int err); | ||
41 | 41 | ||
42 | int v9fs_mux_global_init(void); | 42 | int v9fs_mux_global_init(void); |
43 | void v9fs_mux_global_exit(void); | 43 | void v9fs_mux_global_exit(void); |
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 083dcfcd158e..1a8e46084f0e 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c | |||
@@ -72,11 +72,17 @@ int v9fs_file_open(struct inode *inode, struct file *file) | |||
72 | return -ENOSPC; | 72 | return -ENOSPC; |
73 | } | 73 | } |
74 | 74 | ||
75 | err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, NULL); | 75 | err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, &fcall); |
76 | if (err < 0) { | 76 | if (err < 0) { |
77 | dprintk(DEBUG_ERROR, "rewalk didn't work\n"); | 77 | dprintk(DEBUG_ERROR, "rewalk didn't work\n"); |
78 | goto put_fid; | 78 | if (fcall && fcall->id == RWALK) |
79 | goto clunk_fid; | ||
80 | else { | ||
81 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
82 | goto free_fcall; | ||
83 | } | ||
79 | } | 84 | } |
85 | kfree(fcall); | ||
80 | 86 | ||
81 | /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */ | 87 | /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */ |
82 | /* translate open mode appropriately */ | 88 | /* translate open mode appropriately */ |
@@ -109,8 +115,7 @@ int v9fs_file_open(struct inode *inode, struct file *file) | |||
109 | clunk_fid: | 115 | clunk_fid: |
110 | v9fs_t_clunk(v9ses, fid); | 116 | v9fs_t_clunk(v9ses, fid); |
111 | 117 | ||
112 | put_fid: | 118 | free_fcall: |
113 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
114 | kfree(fcall); | 119 | kfree(fcall); |
115 | 120 | ||
116 | return err; | 121 | return err; |
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 133db366d306..2cb87ba4b1c1 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
@@ -270,7 +270,10 @@ v9fs_create(struct v9fs_session_info *v9ses, u32 pfid, char *name, u32 perm, | |||
270 | err = v9fs_t_walk(v9ses, pfid, fid, NULL, &fcall); | 270 | err = v9fs_t_walk(v9ses, pfid, fid, NULL, &fcall); |
271 | if (err < 0) { | 271 | if (err < 0) { |
272 | PRINT_FCALL_ERROR("clone error", fcall); | 272 | PRINT_FCALL_ERROR("clone error", fcall); |
273 | goto put_fid; | 273 | if (fcall && fcall->id == RWALK) |
274 | goto clunk_fid; | ||
275 | else | ||
276 | goto put_fid; | ||
274 | } | 277 | } |
275 | kfree(fcall); | 278 | kfree(fcall); |
276 | 279 | ||
@@ -322,6 +325,9 @@ v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry) | |||
322 | &fcall); | 325 | &fcall); |
323 | 326 | ||
324 | if (err < 0) { | 327 | if (err < 0) { |
328 | if (fcall && fcall->id == RWALK) | ||
329 | goto clunk_fid; | ||
330 | |||
325 | PRINT_FCALL_ERROR("walk error", fcall); | 331 | PRINT_FCALL_ERROR("walk error", fcall); |
326 | v9fs_put_idpool(nfid, &v9ses->fidpool); | 332 | v9fs_put_idpool(nfid, &v9ses->fidpool); |
327 | goto error; | 333 | goto error; |
@@ -640,19 +646,26 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, | |||
640 | } | 646 | } |
641 | 647 | ||
642 | result = v9fs_t_walk(v9ses, dirfidnum, newfid, | 648 | result = v9fs_t_walk(v9ses, dirfidnum, newfid, |
643 | (char *)dentry->d_name.name, NULL); | 649 | (char *)dentry->d_name.name, &fcall); |
650 | |||
644 | if (result < 0) { | 651 | if (result < 0) { |
645 | v9fs_put_idpool(newfid, &v9ses->fidpool); | 652 | if (fcall && fcall->id == RWALK) |
653 | v9fs_t_clunk(v9ses, newfid); | ||
654 | else | ||
655 | v9fs_put_idpool(newfid, &v9ses->fidpool); | ||
656 | |||
646 | if (result == -ENOENT) { | 657 | if (result == -ENOENT) { |
647 | d_add(dentry, NULL); | 658 | d_add(dentry, NULL); |
648 | dprintk(DEBUG_VFS, | 659 | dprintk(DEBUG_VFS, |
649 | "Return negative dentry %p count %d\n", | 660 | "Return negative dentry %p count %d\n", |
650 | dentry, atomic_read(&dentry->d_count)); | 661 | dentry, atomic_read(&dentry->d_count)); |
662 | kfree(fcall); | ||
651 | return NULL; | 663 | return NULL; |
652 | } | 664 | } |
653 | dprintk(DEBUG_ERROR, "walk error:%d\n", result); | 665 | dprintk(DEBUG_ERROR, "walk error:%d\n", result); |
654 | goto FreeFcall; | 666 | goto FreeFcall; |
655 | } | 667 | } |
668 | kfree(fcall); | ||
656 | 669 | ||
657 | result = v9fs_t_stat(v9ses, newfid, &fcall); | 670 | result = v9fs_t_stat(v9ses, newfid, &fcall); |
658 | if (result < 0) { | 671 | if (result < 0) { |
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 57c4903614e5..d6603d02304c 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h | |||
@@ -74,8 +74,8 @@ struct autofs_wait_queue { | |||
74 | struct autofs_wait_queue *next; | 74 | struct autofs_wait_queue *next; |
75 | autofs_wqt_t wait_queue_token; | 75 | autofs_wqt_t wait_queue_token; |
76 | /* We use the following to see what we are waiting for */ | 76 | /* We use the following to see what we are waiting for */ |
77 | int hash; | 77 | unsigned int hash; |
78 | int len; | 78 | unsigned int len; |
79 | char *name; | 79 | char *name; |
80 | u32 dev; | 80 | u32 dev; |
81 | u64 ino; | 81 | u64 ino; |
@@ -85,7 +85,6 @@ struct autofs_wait_queue { | |||
85 | pid_t tgid; | 85 | pid_t tgid; |
86 | /* This is for status reporting upon return */ | 86 | /* This is for status reporting upon return */ |
87 | int status; | 87 | int status; |
88 | atomic_t notify; | ||
89 | atomic_t wait_ctr; | 88 | atomic_t wait_ctr; |
90 | }; | 89 | }; |
91 | 90 | ||
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 84e030c8ddd0..5100f984783f 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
@@ -327,6 +327,7 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags) | |||
327 | static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) | 327 | static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) |
328 | { | 328 | { |
329 | struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); | 329 | struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); |
330 | struct autofs_info *ino = autofs4_dentry_ino(dentry); | ||
330 | int oz_mode = autofs4_oz_mode(sbi); | 331 | int oz_mode = autofs4_oz_mode(sbi); |
331 | unsigned int lookup_type; | 332 | unsigned int lookup_type; |
332 | int status; | 333 | int status; |
@@ -340,13 +341,8 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
340 | if (oz_mode || !lookup_type) | 341 | if (oz_mode || !lookup_type) |
341 | goto done; | 342 | goto done; |
342 | 343 | ||
343 | /* | 344 | /* If an expire request is pending wait for it. */ |
344 | * If a request is pending wait for it. | 345 | if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) { |
345 | * If it's a mount then it won't be expired till at least | ||
346 | * a liitle later and if it's an expire then we might need | ||
347 | * to mount it again. | ||
348 | */ | ||
349 | if (autofs4_ispending(dentry)) { | ||
350 | DPRINTK("waiting for active request %p name=%.*s", | 346 | DPRINTK("waiting for active request %p name=%.*s", |
351 | dentry, dentry->d_name.len, dentry->d_name.name); | 347 | dentry, dentry->d_name.len, dentry->d_name.name); |
352 | 348 | ||
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 142ab6aa2aa1..ce103e7b0bc3 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c | |||
@@ -189,14 +189,30 @@ static int autofs4_getpath(struct autofs_sb_info *sbi, | |||
189 | return len; | 189 | return len; |
190 | } | 190 | } |
191 | 191 | ||
192 | static struct autofs_wait_queue * | ||
193 | autofs4_find_wait(struct autofs_sb_info *sbi, | ||
194 | char *name, unsigned int hash, unsigned int len) | ||
195 | { | ||
196 | struct autofs_wait_queue *wq; | ||
197 | |||
198 | for (wq = sbi->queues; wq; wq = wq->next) { | ||
199 | if (wq->hash == hash && | ||
200 | wq->len == len && | ||
201 | wq->name && !memcmp(wq->name, name, len)) | ||
202 | break; | ||
203 | } | ||
204 | return wq; | ||
205 | } | ||
206 | |||
192 | int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | 207 | int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, |
193 | enum autofs_notify notify) | 208 | enum autofs_notify notify) |
194 | { | 209 | { |
210 | struct autofs_info *ino; | ||
195 | struct autofs_wait_queue *wq; | 211 | struct autofs_wait_queue *wq; |
196 | char *name; | 212 | char *name; |
197 | unsigned int len = 0; | 213 | unsigned int len = 0; |
198 | unsigned int hash = 0; | 214 | unsigned int hash = 0; |
199 | int status; | 215 | int status, type; |
200 | 216 | ||
201 | /* In catatonic mode, we don't wait for nobody */ | 217 | /* In catatonic mode, we don't wait for nobody */ |
202 | if (sbi->catatonic) | 218 | if (sbi->catatonic) |
@@ -223,21 +239,41 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
223 | return -EINTR; | 239 | return -EINTR; |
224 | } | 240 | } |
225 | 241 | ||
226 | for (wq = sbi->queues ; wq ; wq = wq->next) { | 242 | wq = autofs4_find_wait(sbi, name, hash, len); |
227 | if (wq->hash == dentry->d_name.hash && | 243 | ino = autofs4_dentry_ino(dentry); |
228 | wq->len == len && | 244 | if (!wq && ino && notify == NFY_NONE) { |
229 | wq->name && !memcmp(wq->name, name, len)) | 245 | /* |
230 | break; | 246 | * Either we've betean the pending expire to post it's |
231 | } | 247 | * wait or it finished while we waited on the mutex. |
248 | * So we need to wait till either, the wait appears | ||
249 | * or the expire finishes. | ||
250 | */ | ||
251 | |||
252 | while (ino->flags & AUTOFS_INF_EXPIRING) { | ||
253 | mutex_unlock(&sbi->wq_mutex); | ||
254 | schedule_timeout_interruptible(HZ/10); | ||
255 | if (mutex_lock_interruptible(&sbi->wq_mutex)) { | ||
256 | kfree(name); | ||
257 | return -EINTR; | ||
258 | } | ||
259 | wq = autofs4_find_wait(sbi, name, hash, len); | ||
260 | if (wq) | ||
261 | break; | ||
262 | } | ||
232 | 263 | ||
233 | if (!wq) { | 264 | /* |
234 | /* Can't wait for an expire if there's no mount */ | 265 | * Not ideal but the status has already gone. Of the two |
235 | if (notify == NFY_NONE && !d_mountpoint(dentry)) { | 266 | * cases where we wait on NFY_NONE neither depend on the |
267 | * return status of the wait. | ||
268 | */ | ||
269 | if (!wq) { | ||
236 | kfree(name); | 270 | kfree(name); |
237 | mutex_unlock(&sbi->wq_mutex); | 271 | mutex_unlock(&sbi->wq_mutex); |
238 | return -ENOENT; | 272 | return 0; |
239 | } | 273 | } |
274 | } | ||
240 | 275 | ||
276 | if (!wq) { | ||
241 | /* Create a new wait queue */ | 277 | /* Create a new wait queue */ |
242 | wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); | 278 | wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); |
243 | if (!wq) { | 279 | if (!wq) { |
@@ -263,20 +299,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
263 | wq->tgid = current->tgid; | 299 | wq->tgid = current->tgid; |
264 | wq->status = -EINTR; /* Status return if interrupted */ | 300 | wq->status = -EINTR; /* Status return if interrupted */ |
265 | atomic_set(&wq->wait_ctr, 2); | 301 | atomic_set(&wq->wait_ctr, 2); |
266 | atomic_set(&wq->notify, 1); | ||
267 | mutex_unlock(&sbi->wq_mutex); | ||
268 | } else { | ||
269 | atomic_inc(&wq->wait_ctr); | ||
270 | mutex_unlock(&sbi->wq_mutex); | 302 | mutex_unlock(&sbi->wq_mutex); |
271 | kfree(name); | ||
272 | DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", | ||
273 | (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); | ||
274 | } | ||
275 | |||
276 | if (notify != NFY_NONE && atomic_read(&wq->notify)) { | ||
277 | int type; | ||
278 | |||
279 | atomic_dec(&wq->notify); | ||
280 | 303 | ||
281 | if (sbi->version < 5) { | 304 | if (sbi->version < 5) { |
282 | if (notify == NFY_MOUNT) | 305 | if (notify == NFY_MOUNT) |
@@ -299,6 +322,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
299 | 322 | ||
300 | /* autofs4_notify_daemon() may block */ | 323 | /* autofs4_notify_daemon() may block */ |
301 | autofs4_notify_daemon(sbi, wq, type); | 324 | autofs4_notify_daemon(sbi, wq, type); |
325 | } else { | ||
326 | atomic_inc(&wq->wait_ctr); | ||
327 | mutex_unlock(&sbi->wq_mutex); | ||
328 | kfree(name); | ||
329 | DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", | ||
330 | (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); | ||
302 | } | 331 | } |
303 | 332 | ||
304 | /* wq->name is NULL if and only if the lock is already released */ | 333 | /* wq->name is NULL if and only if the lock is already released */ |
diff --git a/fs/compat.c b/fs/compat.c index 3f3e8f4d43d6..01f39f87f372 100644 --- a/fs/compat.c +++ b/fs/compat.c | |||
@@ -1323,7 +1323,7 @@ compat_sys_vmsplice(int fd, const struct compat_iovec __user *iov32, | |||
1323 | { | 1323 | { |
1324 | unsigned i; | 1324 | unsigned i; |
1325 | struct iovec *iov; | 1325 | struct iovec *iov; |
1326 | if (nr_segs >= UIO_MAXIOV) | 1326 | if (nr_segs > UIO_MAXIOV) |
1327 | return -EINVAL; | 1327 | return -EINVAL; |
1328 | iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec)); | 1328 | iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec)); |
1329 | for (i = 0; i < nr_segs; i++) { | 1329 | for (i = 0; i < nr_segs; i++) { |
@@ -1913,7 +1913,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, | |||
1913 | } | 1913 | } |
1914 | 1914 | ||
1915 | if (sigmask) { | 1915 | if (sigmask) { |
1916 | if (sigsetsize |= sizeof(compat_sigset_t)) | 1916 | if (sigsetsize != sizeof(compat_sigset_t)) |
1917 | return -EINVAL; | 1917 | return -EINVAL; |
1918 | if (copy_from_user(&ss32, sigmask, sizeof(ss32))) | 1918 | if (copy_from_user(&ss32, sigmask, sizeof(ss32))) |
1919 | return -EFAULT; | 1919 | return -EFAULT; |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 48ae0339af17..2edd7eec88fd 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -711,7 +711,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, | |||
711 | * direct blocks blocks | 711 | * direct blocks blocks |
712 | */ | 712 | */ |
713 | if (num == 0 && blks > 1) { | 713 | if (num == 0 && blks > 1) { |
714 | current_block = le32_to_cpu(where->key + 1); | 714 | current_block = le32_to_cpu(where->key) + 1; |
715 | for (i = 1; i < blks; i++) | 715 | for (i = 1; i < blks; i++) |
716 | *(where->p + i ) = cpu_to_le32(current_block++); | 716 | *(where->p + i ) = cpu_to_le32(current_block++); |
717 | } | 717 | } |
@@ -724,7 +724,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, | |||
724 | if (block_i) { | 724 | if (block_i) { |
725 | block_i->last_alloc_logical_block = block + blks - 1; | 725 | block_i->last_alloc_logical_block = block + blks - 1; |
726 | block_i->last_alloc_physical_block = | 726 | block_i->last_alloc_physical_block = |
727 | le32_to_cpu(where[num].key + blks - 1); | 727 | le32_to_cpu(where[num].key) + blks - 1; |
728 | } | 728 | } |
729 | 729 | ||
730 | /* We are done with atomic stuff, now do the rest of housekeeping */ | 730 | /* We are done with atomic stuff, now do the rest of housekeeping */ |
@@ -814,11 +814,13 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |||
814 | 814 | ||
815 | /* Simplest case - block found, no allocation needed */ | 815 | /* Simplest case - block found, no allocation needed */ |
816 | if (!partial) { | 816 | if (!partial) { |
817 | first_block = chain[depth - 1].key; | 817 | first_block = le32_to_cpu(chain[depth - 1].key); |
818 | clear_buffer_new(bh_result); | 818 | clear_buffer_new(bh_result); |
819 | count++; | 819 | count++; |
820 | /*map more blocks*/ | 820 | /*map more blocks*/ |
821 | while (count < maxblocks && count <= blocks_to_boundary) { | 821 | while (count < maxblocks && count <= blocks_to_boundary) { |
822 | unsigned long blk; | ||
823 | |||
822 | if (!verify_chain(chain, partial)) { | 824 | if (!verify_chain(chain, partial)) { |
823 | /* | 825 | /* |
824 | * Indirect block might be removed by | 826 | * Indirect block might be removed by |
@@ -831,8 +833,9 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |||
831 | count = 0; | 833 | count = 0; |
832 | break; | 834 | break; |
833 | } | 835 | } |
834 | if (le32_to_cpu(*(chain[depth-1].p+count) == | 836 | blk = le32_to_cpu(*(chain[depth-1].p + count)); |
835 | (first_block + count))) | 837 | |
838 | if (blk == first_block + count) | ||
836 | count++; | 839 | count++; |
837 | else | 840 | else |
838 | break; | 841 | break; |
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c index e7944e665b9f..5f45e01d71ed 100644 --- a/fs/jffs2/compr.c +++ b/fs/jffs2/compr.c | |||
@@ -412,7 +412,7 @@ void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig) | |||
412 | kfree(comprbuf); | 412 | kfree(comprbuf); |
413 | } | 413 | } |
414 | 414 | ||
415 | int jffs2_compressors_init(void) | 415 | int __init jffs2_compressors_init(void) |
416 | { | 416 | { |
417 | /* Registering compressors */ | 417 | /* Registering compressors */ |
418 | #ifdef CONFIG_JFFS2_ZLIB | 418 | #ifdef CONFIG_JFFS2_ZLIB |
@@ -440,7 +440,7 @@ int jffs2_compressors_init(void) | |||
440 | return 0; | 440 | return 0; |
441 | } | 441 | } |
442 | 442 | ||
443 | int jffs2_compressors_exit(void) | 443 | int __exit jffs2_compressors_exit(void) |
444 | { | 444 | { |
445 | /* Unregistering compressors */ | 445 | /* Unregistering compressors */ |
446 | #ifdef CONFIG_JFFS2_RUBIN | 446 | #ifdef CONFIG_JFFS2_RUBIN |
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c index 5c63e0cdcf4c..d43cbac4fb9b 100644 --- a/fs/jffs2/compr_zlib.c +++ b/fs/jffs2/compr_zlib.c | |||
@@ -60,7 +60,7 @@ static int __init alloc_workspaces(void) | |||
60 | return 0; | 60 | return 0; |
61 | } | 61 | } |
62 | 62 | ||
63 | static void free_workspaces(void) | 63 | static void __exit free_workspaces(void) |
64 | { | 64 | { |
65 | vfree(def_strm.workspace); | 65 | vfree(def_strm.workspace); |
66 | vfree(inf_strm.workspace); | 66 | vfree(inf_strm.workspace); |
@@ -216,7 +216,7 @@ int __init jffs2_zlib_init(void) | |||
216 | return ret; | 216 | return ret; |
217 | } | 217 | } |
218 | 218 | ||
219 | void jffs2_zlib_exit(void) | 219 | void __exit jffs2_zlib_exit(void) |
220 | { | 220 | { |
221 | jffs2_unregister_compressor(&jffs2_zlib_comp); | 221 | jffs2_unregister_compressor(&jffs2_zlib_comp); |
222 | free_workspaces(); | 222 | free_workspaces(); |
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index e92187f34d5f..e18c9437d58f 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c | |||
@@ -220,12 +220,20 @@ static int jffs2_commit_write (struct file *filp, struct page *pg, | |||
220 | D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", | 220 | D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", |
221 | inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags)); | 221 | inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags)); |
222 | 222 | ||
223 | if (!start && end == PAGE_CACHE_SIZE) { | 223 | if (end == PAGE_CACHE_SIZE) { |
224 | /* We need to avoid deadlock with page_cache_read() in | 224 | if (!start) { |
225 | jffs2_garbage_collect_pass(). So we have to mark the | 225 | /* We need to avoid deadlock with page_cache_read() in |
226 | page up to date, to prevent page_cache_read() from | 226 | jffs2_garbage_collect_pass(). So we have to mark the |
227 | trying to re-lock it. */ | 227 | page up to date, to prevent page_cache_read() from |
228 | SetPageUptodate(pg); | 228 | trying to re-lock it. */ |
229 | SetPageUptodate(pg); | ||
230 | } else { | ||
231 | /* When writing out the end of a page, write out the | ||
232 | _whole_ page. This helps to reduce the number of | ||
233 | nodes in files which have many short writes, like | ||
234 | syslog files. */ | ||
235 | start = aligned_start = 0; | ||
236 | } | ||
229 | } | 237 | } |
230 | 238 | ||
231 | ri = jffs2_alloc_raw_inode(); | 239 | ri = jffs2_alloc_raw_inode(); |
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 9c575733659b..4973cd648ba8 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c | |||
@@ -438,7 +438,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info | |||
438 | if (c->mtd->point) { | 438 | if (c->mtd->point) { |
439 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); | 439 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); |
440 | if (!err && retlen < tn->csize) { | 440 | if (!err && retlen < tn->csize) { |
441 | JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize); | 441 | JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); |
442 | c->mtd->unpoint(c->mtd, buffer, ofs, len); | 442 | c->mtd->unpoint(c->mtd, buffer, ofs, len); |
443 | } else if (err) | 443 | } else if (err) |
444 | JFFS2_WARNING("MTD point failed: error code %d.\n", err); | 444 | JFFS2_WARNING("MTD point failed: error code %d.\n", err); |
@@ -461,7 +461,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info | |||
461 | } | 461 | } |
462 | 462 | ||
463 | if (retlen != len) { | 463 | if (retlen != len) { |
464 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ofs, retlen, len); | 464 | JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len); |
465 | err = -EIO; | 465 | err = -EIO; |
466 | goto free_out; | 466 | goto free_out; |
467 | } | 467 | } |
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index 0a79fc921e9f..5847e76ce16c 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c | |||
@@ -222,9 +222,6 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
222 | } | 222 | } |
223 | } | 223 | } |
224 | 224 | ||
225 | if (jffs2_sum_active() && s) | ||
226 | kfree(s); | ||
227 | |||
228 | /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ | 225 | /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ |
229 | if (c->nextblock && (c->nextblock->dirty_size)) { | 226 | if (c->nextblock && (c->nextblock->dirty_size)) { |
230 | c->nextblock->wasted_size += c->nextblock->dirty_size; | 227 | c->nextblock->wasted_size += c->nextblock->dirty_size; |
@@ -266,6 +263,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
266 | else | 263 | else |
267 | c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); | 264 | c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); |
268 | #endif | 265 | #endif |
266 | if (s) | ||
267 | kfree(s); | ||
268 | |||
269 | return ret; | 269 | return ret; |
270 | } | 270 | } |
271 | 271 | ||
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c index 9763d73c0da1..439b9f6d5837 100644 --- a/fs/jffs2/summary.c +++ b/fs/jffs2/summary.c | |||
@@ -853,7 +853,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
853 | 853 | ||
854 | 854 | ||
855 | if (ret || (retlen != infosize)) { | 855 | if (ret || (retlen != infosize)) { |
856 | JFFS2_WARNING("Write of %d bytes at 0x%08x failed. returned %d, retlen %zu\n", | 856 | JFFS2_WARNING("Write of %u bytes at 0x%08x failed. returned %d, retlen %zd\n", |
857 | infosize, jeb->offset + c->sector_size - jeb->free_size, ret, retlen); | 857 | infosize, jeb->offset + c->sector_size - jeb->free_size, ret, retlen); |
858 | 858 | ||
859 | c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; | 859 | c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; |
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index c8b539ee7d80..9d0521451f59 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c | |||
@@ -324,6 +324,18 @@ static int __init init_jffs2_fs(void) | |||
324 | { | 324 | { |
325 | int ret; | 325 | int ret; |
326 | 326 | ||
327 | /* Paranoia checks for on-medium structures. If we ask GCC | ||
328 | to pack them with __attribute__((packed)) then it _also_ | ||
329 | assumes that they're not aligned -- so it emits crappy | ||
330 | code on some architectures. Ideally we want an attribute | ||
331 | which means just 'no padding', without the alignment | ||
332 | thing. But GCC doesn't have that -- we have to just | ||
333 | hope the structs are the right sizes, instead. */ | ||
334 | BUG_ON(sizeof(struct jffs2_unknown_node) != 12); | ||
335 | BUG_ON(sizeof(struct jffs2_raw_dirent) != 40); | ||
336 | BUG_ON(sizeof(struct jffs2_raw_inode) != 68); | ||
337 | BUG_ON(sizeof(struct jffs2_raw_summary) != 32); | ||
338 | |||
327 | printk(KERN_INFO "JFFS2 version 2.2." | 339 | printk(KERN_INFO "JFFS2 version 2.2." |
328 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 340 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
329 | " (NAND)" | 341 | " (NAND)" |
@@ -331,7 +343,7 @@ static int __init init_jffs2_fs(void) | |||
331 | #ifdef CONFIG_JFFS2_SUMMARY | 343 | #ifdef CONFIG_JFFS2_SUMMARY |
332 | " (SUMMARY) " | 344 | " (SUMMARY) " |
333 | #endif | 345 | #endif |
334 | " (C) 2001-2003 Red Hat, Inc.\n"); | 346 | " (C) 2001-2006 Red Hat, Inc.\n"); |
335 | 347 | ||
336 | jffs2_inode_cachep = kmem_cache_create("jffs2_i", | 348 | jffs2_inode_cachep = kmem_cache_create("jffs2_i", |
337 | sizeof(struct jffs2_inode_info), | 349 | sizeof(struct jffs2_inode_info), |
diff --git a/fs/locks.c b/fs/locks.c index efad798824dc..6f99c0a6f836 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -446,15 +446,14 @@ static struct lock_manager_operations lease_manager_ops = { | |||
446 | */ | 446 | */ |
447 | static int lease_init(struct file *filp, int type, struct file_lock *fl) | 447 | static int lease_init(struct file *filp, int type, struct file_lock *fl) |
448 | { | 448 | { |
449 | if (assign_type(fl, type) != 0) | ||
450 | return -EINVAL; | ||
451 | |||
449 | fl->fl_owner = current->files; | 452 | fl->fl_owner = current->files; |
450 | fl->fl_pid = current->tgid; | 453 | fl->fl_pid = current->tgid; |
451 | 454 | ||
452 | fl->fl_file = filp; | 455 | fl->fl_file = filp; |
453 | fl->fl_flags = FL_LEASE; | 456 | fl->fl_flags = FL_LEASE; |
454 | if (assign_type(fl, type) != 0) { | ||
455 | locks_free_lock(fl); | ||
456 | return -EINVAL; | ||
457 | } | ||
458 | fl->fl_start = 0; | 457 | fl->fl_start = 0; |
459 | fl->fl_end = OFFSET_MAX; | 458 | fl->fl_end = OFFSET_MAX; |
460 | fl->fl_ops = NULL; | 459 | fl->fl_ops = NULL; |
@@ -466,16 +465,19 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl) | |||
466 | static int lease_alloc(struct file *filp, int type, struct file_lock **flp) | 465 | static int lease_alloc(struct file *filp, int type, struct file_lock **flp) |
467 | { | 466 | { |
468 | struct file_lock *fl = locks_alloc_lock(); | 467 | struct file_lock *fl = locks_alloc_lock(); |
469 | int error; | 468 | int error = -ENOMEM; |
470 | 469 | ||
471 | if (fl == NULL) | 470 | if (fl == NULL) |
472 | return -ENOMEM; | 471 | goto out; |
473 | 472 | ||
474 | error = lease_init(filp, type, fl); | 473 | error = lease_init(filp, type, fl); |
475 | if (error) | 474 | if (error) { |
476 | return error; | 475 | locks_free_lock(fl); |
476 | fl = NULL; | ||
477 | } | ||
478 | out: | ||
477 | *flp = fl; | 479 | *flp = fl; |
478 | return 0; | 480 | return error; |
479 | } | 481 | } |
480 | 482 | ||
481 | /* Check if two locks overlap each other. | 483 | /* Check if two locks overlap each other. |
@@ -1372,6 +1374,7 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp) | |||
1372 | goto out; | 1374 | goto out; |
1373 | 1375 | ||
1374 | if (my_before != NULL) { | 1376 | if (my_before != NULL) { |
1377 | *flp = *my_before; | ||
1375 | error = lease->fl_lmops->fl_change(my_before, arg); | 1378 | error = lease->fl_lmops->fl_change(my_before, arg); |
1376 | goto out; | 1379 | goto out; |
1377 | } | 1380 | } |
diff --git a/fs/namespace.c b/fs/namespace.c index 2c5f1f80bdc2..bf478addb852 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -899,13 +899,11 @@ static int do_change_type(struct nameidata *nd, int flag) | |||
899 | /* | 899 | /* |
900 | * do loopback mount. | 900 | * do loopback mount. |
901 | */ | 901 | */ |
902 | static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags, int mnt_flags) | 902 | static int do_loopback(struct nameidata *nd, char *old_name, int recurse) |
903 | { | 903 | { |
904 | struct nameidata old_nd; | 904 | struct nameidata old_nd; |
905 | struct vfsmount *mnt = NULL; | 905 | struct vfsmount *mnt = NULL; |
906 | int recurse = flags & MS_REC; | ||
907 | int err = mount_is_safe(nd); | 906 | int err = mount_is_safe(nd); |
908 | |||
909 | if (err) | 907 | if (err) |
910 | return err; | 908 | return err; |
911 | if (!old_name || !*old_name) | 909 | if (!old_name || !*old_name) |
@@ -939,7 +937,6 @@ static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags | |||
939 | spin_unlock(&vfsmount_lock); | 937 | spin_unlock(&vfsmount_lock); |
940 | release_mounts(&umount_list); | 938 | release_mounts(&umount_list); |
941 | } | 939 | } |
942 | mnt->mnt_flags = mnt_flags; | ||
943 | 940 | ||
944 | out: | 941 | out: |
945 | up_write(&namespace_sem); | 942 | up_write(&namespace_sem); |
@@ -1353,7 +1350,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page, | |||
1353 | retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags, | 1350 | retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags, |
1354 | data_page); | 1351 | data_page); |
1355 | else if (flags & MS_BIND) | 1352 | else if (flags & MS_BIND) |
1356 | retval = do_loopback(&nd, dev_name, flags, mnt_flags); | 1353 | retval = do_loopback(&nd, dev_name, flags & MS_REC); |
1357 | else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) | 1354 | else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) |
1358 | retval = do_change_type(&nd, flags); | 1355 | retval = do_change_type(&nd, flags); |
1359 | else if (flags & MS_MOVE) | 1356 | else if (flags & MS_MOVE) |
@@ -1124,7 +1124,6 @@ asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, | |||
1124 | prevent_tail_call(ret); | 1124 | prevent_tail_call(ret); |
1125 | return ret; | 1125 | return ret; |
1126 | } | 1126 | } |
1127 | EXPORT_SYMBOL_GPL(sys_openat); | ||
1128 | 1127 | ||
1129 | #ifndef __alpha__ | 1128 | #ifndef __alpha__ |
1130 | 1129 | ||
diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 45ae7dd3c650..7ef1f094de91 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c | |||
@@ -533,6 +533,7 @@ void del_gendisk(struct gendisk *disk) | |||
533 | 533 | ||
534 | devfs_remove_disk(disk); | 534 | devfs_remove_disk(disk); |
535 | 535 | ||
536 | kobject_uevent(&disk->kobj, KOBJ_REMOVE); | ||
536 | if (disk->holder_dir) | 537 | if (disk->holder_dir) |
537 | kobject_unregister(disk->holder_dir); | 538 | kobject_unregister(disk->holder_dir); |
538 | if (disk->slave_dir) | 539 | if (disk->slave_dir) |
@@ -545,7 +546,7 @@ void del_gendisk(struct gendisk *disk) | |||
545 | kfree(disk_name); | 546 | kfree(disk_name); |
546 | } | 547 | } |
547 | put_device(disk->driverfs_dev); | 548 | put_device(disk->driverfs_dev); |
549 | disk->driverfs_dev = NULL; | ||
548 | } | 550 | } |
549 | kobject_uevent(&disk->kobj, KOBJ_REMOVE); | ||
550 | kobject_del(&disk->kobj); | 551 | kobject_del(&disk->kobj); |
551 | } | 552 | } |
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c index 34c7a11d91f0..70d9c5a37f5a 100644 --- a/fs/smbfs/dir.c +++ b/fs/smbfs/dir.c | |||
@@ -434,6 +434,11 @@ smb_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
434 | if (dentry->d_name.len > SMB_MAXNAMELEN) | 434 | if (dentry->d_name.len > SMB_MAXNAMELEN) |
435 | goto out; | 435 | goto out; |
436 | 436 | ||
437 | /* Do not allow lookup of names with backslashes in */ | ||
438 | error = -EINVAL; | ||
439 | if (memchr(dentry->d_name.name, '\\', dentry->d_name.len)) | ||
440 | goto out; | ||
441 | |||
437 | lock_kernel(); | 442 | lock_kernel(); |
438 | error = smb_proc_getattr(dentry, &finfo); | 443 | error = smb_proc_getattr(dentry, &finfo); |
439 | #ifdef SMBFS_PARANOIA | 444 | #ifdef SMBFS_PARANOIA |
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c index c71c375863cc..c71dd2760d32 100644 --- a/fs/smbfs/request.c +++ b/fs/smbfs/request.c | |||
@@ -339,9 +339,11 @@ int smb_add_request(struct smb_request *req) | |||
339 | /* | 339 | /* |
340 | * On timeout or on interrupt we want to try and remove the | 340 | * On timeout or on interrupt we want to try and remove the |
341 | * request from the recvq/xmitq. | 341 | * request from the recvq/xmitq. |
342 | * First check if the request is still part of a queue. (May | ||
343 | * have been removed by some error condition) | ||
342 | */ | 344 | */ |
343 | smb_lock_server(server); | 345 | smb_lock_server(server); |
344 | if (!(req->rq_flags & SMB_REQ_RECEIVED)) { | 346 | if (!list_empty(&req->rq_queue)) { |
345 | list_del_init(&req->rq_queue); | 347 | list_del_init(&req->rq_queue); |
346 | smb_rput(req); | 348 | smb_rput(req); |
347 | } | 349 | } |
diff --git a/fs/splice.c b/fs/splice.c index 7fb04970c72d..a285fd746dc0 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -51,7 +51,7 @@ struct splice_pipe_desc { | |||
51 | * addition of remove_mapping(). If success is returned, the caller may | 51 | * addition of remove_mapping(). If success is returned, the caller may |
52 | * attempt to reuse this page for another destination. | 52 | * attempt to reuse this page for another destination. |
53 | */ | 53 | */ |
54 | static int page_cache_pipe_buf_steal(struct pipe_inode_info *info, | 54 | static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe, |
55 | struct pipe_buffer *buf) | 55 | struct pipe_buffer *buf) |
56 | { | 56 | { |
57 | struct page *page = buf->page; | 57 | struct page *page = buf->page; |
@@ -78,16 +78,18 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info, | |||
78 | return 1; | 78 | return 1; |
79 | } | 79 | } |
80 | 80 | ||
81 | buf->flags |= PIPE_BUF_FLAG_LRU; | ||
81 | return 0; | 82 | return 0; |
82 | } | 83 | } |
83 | 84 | ||
84 | static void page_cache_pipe_buf_release(struct pipe_inode_info *info, | 85 | static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, |
85 | struct pipe_buffer *buf) | 86 | struct pipe_buffer *buf) |
86 | { | 87 | { |
87 | page_cache_release(buf->page); | 88 | page_cache_release(buf->page); |
89 | buf->flags &= ~PIPE_BUF_FLAG_LRU; | ||
88 | } | 90 | } |
89 | 91 | ||
90 | static int page_cache_pipe_buf_pin(struct pipe_inode_info *info, | 92 | static int page_cache_pipe_buf_pin(struct pipe_inode_info *pipe, |
91 | struct pipe_buffer *buf) | 93 | struct pipe_buffer *buf) |
92 | { | 94 | { |
93 | struct page *page = buf->page; | 95 | struct page *page = buf->page; |
@@ -141,6 +143,7 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe, | |||
141 | if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) | 143 | if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) |
142 | return 1; | 144 | return 1; |
143 | 145 | ||
146 | buf->flags |= PIPE_BUF_FLAG_LRU; | ||
144 | return generic_pipe_buf_steal(pipe, buf); | 147 | return generic_pipe_buf_steal(pipe, buf); |
145 | } | 148 | } |
146 | 149 | ||
@@ -321,6 +324,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, | |||
321 | mapping_gfp_mask(mapping)); | 324 | mapping_gfp_mask(mapping)); |
322 | if (unlikely(error)) { | 325 | if (unlikely(error)) { |
323 | page_cache_release(page); | 326 | page_cache_release(page); |
327 | if (error == -EEXIST) | ||
328 | continue; | ||
324 | break; | 329 | break; |
325 | } | 330 | } |
326 | /* | 331 | /* |
@@ -497,14 +502,14 @@ EXPORT_SYMBOL(generic_file_splice_read); | |||
497 | * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' | 502 | * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' |
498 | * using sendpage(). Return the number of bytes sent. | 503 | * using sendpage(). Return the number of bytes sent. |
499 | */ | 504 | */ |
500 | static int pipe_to_sendpage(struct pipe_inode_info *info, | 505 | static int pipe_to_sendpage(struct pipe_inode_info *pipe, |
501 | struct pipe_buffer *buf, struct splice_desc *sd) | 506 | struct pipe_buffer *buf, struct splice_desc *sd) |
502 | { | 507 | { |
503 | struct file *file = sd->file; | 508 | struct file *file = sd->file; |
504 | loff_t pos = sd->pos; | 509 | loff_t pos = sd->pos; |
505 | int ret, more; | 510 | int ret, more; |
506 | 511 | ||
507 | ret = buf->ops->pin(info, buf); | 512 | ret = buf->ops->pin(pipe, buf); |
508 | if (!ret) { | 513 | if (!ret) { |
509 | more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; | 514 | more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; |
510 | 515 | ||
@@ -535,7 +540,7 @@ static int pipe_to_sendpage(struct pipe_inode_info *info, | |||
535 | * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create | 540 | * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create |
536 | * a new page in the output file page cache and fill/dirty that. | 541 | * a new page in the output file page cache and fill/dirty that. |
537 | */ | 542 | */ |
538 | static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf, | 543 | static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, |
539 | struct splice_desc *sd) | 544 | struct splice_desc *sd) |
540 | { | 545 | { |
541 | struct file *file = sd->file; | 546 | struct file *file = sd->file; |
@@ -549,7 +554,7 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf, | |||
549 | /* | 554 | /* |
550 | * make sure the data in this buffer is uptodate | 555 | * make sure the data in this buffer is uptodate |
551 | */ | 556 | */ |
552 | ret = buf->ops->pin(info, buf); | 557 | ret = buf->ops->pin(pipe, buf); |
553 | if (unlikely(ret)) | 558 | if (unlikely(ret)) |
554 | return ret; | 559 | return ret; |
555 | 560 | ||
@@ -566,37 +571,23 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf, | |||
566 | */ | 571 | */ |
567 | if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) { | 572 | if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) { |
568 | /* | 573 | /* |
569 | * If steal succeeds, buf->page is now pruned from the vm | 574 | * If steal succeeds, buf->page is now pruned from the |
570 | * side (page cache) and we can reuse it. The page will also | 575 | * pagecache and we can reuse it. The page will also be |
571 | * be locked on successful return. | 576 | * locked on successful return. |
572 | */ | 577 | */ |
573 | if (buf->ops->steal(info, buf)) | 578 | if (buf->ops->steal(pipe, buf)) |
574 | goto find_page; | 579 | goto find_page; |
575 | 580 | ||
576 | page = buf->page; | 581 | page = buf->page; |
577 | page_cache_get(page); | ||
578 | |||
579 | /* | ||
580 | * page must be on the LRU for adding to the pagecache. | ||
581 | * Check this without grabbing the zone lock, if it isn't | ||
582 | * the do grab the zone lock, recheck, and add if necessary. | ||
583 | */ | ||
584 | if (!PageLRU(page)) { | ||
585 | struct zone *zone = page_zone(page); | ||
586 | |||
587 | spin_lock_irq(&zone->lru_lock); | ||
588 | if (!PageLRU(page)) { | ||
589 | SetPageLRU(page); | ||
590 | add_page_to_inactive_list(zone, page); | ||
591 | } | ||
592 | spin_unlock_irq(&zone->lru_lock); | ||
593 | } | ||
594 | |||
595 | if (add_to_page_cache(page, mapping, index, gfp_mask)) { | 582 | if (add_to_page_cache(page, mapping, index, gfp_mask)) { |
596 | page_cache_release(page); | ||
597 | unlock_page(page); | 583 | unlock_page(page); |
598 | goto find_page; | 584 | goto find_page; |
599 | } | 585 | } |
586 | |||
587 | page_cache_get(page); | ||
588 | |||
589 | if (!(buf->flags & PIPE_BUF_FLAG_LRU)) | ||
590 | lru_cache_add(page); | ||
600 | } else { | 591 | } else { |
601 | find_page: | 592 | find_page: |
602 | page = find_lock_page(mapping, index); | 593 | page = find_lock_page(mapping, index); |
@@ -647,23 +638,36 @@ find_page: | |||
647 | } | 638 | } |
648 | 639 | ||
649 | ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len); | 640 | ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len); |
650 | if (ret == AOP_TRUNCATED_PAGE) { | 641 | if (unlikely(ret)) { |
642 | loff_t isize = i_size_read(mapping->host); | ||
643 | |||
644 | if (ret != AOP_TRUNCATED_PAGE) | ||
645 | unlock_page(page); | ||
651 | page_cache_release(page); | 646 | page_cache_release(page); |
652 | goto find_page; | 647 | if (ret == AOP_TRUNCATED_PAGE) |
653 | } else if (ret) | 648 | goto find_page; |
649 | |||
650 | /* | ||
651 | * prepare_write() may have instantiated a few blocks | ||
652 | * outside i_size. Trim these off again. | ||
653 | */ | ||
654 | if (sd->pos + this_len > isize) | ||
655 | vmtruncate(mapping->host, isize); | ||
656 | |||
654 | goto out; | 657 | goto out; |
658 | } | ||
655 | 659 | ||
656 | if (buf->page != page) { | 660 | if (buf->page != page) { |
657 | /* | 661 | /* |
658 | * Careful, ->map() uses KM_USER0! | 662 | * Careful, ->map() uses KM_USER0! |
659 | */ | 663 | */ |
660 | char *src = buf->ops->map(info, buf, 1); | 664 | char *src = buf->ops->map(pipe, buf, 1); |
661 | char *dst = kmap_atomic(page, KM_USER1); | 665 | char *dst = kmap_atomic(page, KM_USER1); |
662 | 666 | ||
663 | memcpy(dst + offset, src + buf->offset, this_len); | 667 | memcpy(dst + offset, src + buf->offset, this_len); |
664 | flush_dcache_page(page); | 668 | flush_dcache_page(page); |
665 | kunmap_atomic(dst, KM_USER1); | 669 | kunmap_atomic(dst, KM_USER1); |
666 | buf->ops->unmap(info, buf, src); | 670 | buf->ops->unmap(pipe, buf, src); |
667 | } | 671 | } |
668 | 672 | ||
669 | ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len); | 673 | ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len); |
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 64ee07db0d5e..8558226281c4 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
@@ -1942,8 +1942,10 @@ xfs_alloc_fix_freelist( | |||
1942 | /* | 1942 | /* |
1943 | * Allocate as many blocks as possible at once. | 1943 | * Allocate as many blocks as possible at once. |
1944 | */ | 1944 | */ |
1945 | if ((error = xfs_alloc_ag_vextent(&targs))) | 1945 | if ((error = xfs_alloc_ag_vextent(&targs))) { |
1946 | xfs_trans_brelse(tp, agflbp); | ||
1946 | return error; | 1947 | return error; |
1948 | } | ||
1947 | /* | 1949 | /* |
1948 | * Stop if we run out. Won't happen if callers are obeying | 1950 | * Stop if we run out. Won't happen if callers are obeying |
1949 | * the restrictions correctly. Can happen for free calls | 1951 | * the restrictions correctly. Can happen for free calls |
@@ -1960,6 +1962,7 @@ xfs_alloc_fix_freelist( | |||
1960 | return error; | 1962 | return error; |
1961 | } | 1963 | } |
1962 | } | 1964 | } |
1965 | xfs_trans_brelse(tp, agflbp); | ||
1963 | args->agbp = agbp; | 1966 | args->agbp = agbp; |
1964 | return 0; | 1967 | return 0; |
1965 | } | 1968 | } |
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index 81a05cfd77d2..1f148762eb28 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c | |||
@@ -316,6 +316,18 @@ xfs_rename( | |||
316 | } | 316 | } |
317 | } | 317 | } |
318 | 318 | ||
319 | /* | ||
320 | * If we are using project inheritance, we only allow renames | ||
321 | * into our tree when the project IDs are the same; else the | ||
322 | * tree quota mechanism would be circumvented. | ||
323 | */ | ||
324 | if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && | ||
325 | (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) { | ||
326 | error = XFS_ERROR(EXDEV); | ||
327 | xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED); | ||
328 | goto rele_return; | ||
329 | } | ||
330 | |||
319 | new_parent = (src_dp != target_dp); | 331 | new_parent = (src_dp != target_dp); |
320 | src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR); | 332 | src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR); |
321 | 333 | ||
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index f0e09ca14139..36ea1b2094f2 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c | |||
@@ -669,31 +669,22 @@ xfs_mntupdate( | |||
669 | xfs_mount_t *mp = XFS_BHVTOM(bdp); | 669 | xfs_mount_t *mp = XFS_BHVTOM(bdp); |
670 | int error; | 670 | int error; |
671 | 671 | ||
672 | if (args->flags & XFSMNT_BARRIER) | 672 | if (!(*flags & MS_RDONLY)) { /* rw/ro -> rw */ |
673 | mp->m_flags |= XFS_MOUNT_BARRIER; | 673 | if (vfsp->vfs_flag & VFS_RDONLY) |
674 | else | 674 | vfsp->vfs_flag &= ~VFS_RDONLY; |
675 | mp->m_flags &= ~XFS_MOUNT_BARRIER; | 675 | if (args->flags & XFSMNT_BARRIER) { |
676 | 676 | mp->m_flags |= XFS_MOUNT_BARRIER; | |
677 | if ((vfsp->vfs_flag & VFS_RDONLY) && | ||
678 | !(*flags & MS_RDONLY)) { | ||
679 | vfsp->vfs_flag &= ~VFS_RDONLY; | ||
680 | |||
681 | if (args->flags & XFSMNT_BARRIER) | ||
682 | xfs_mountfs_check_barriers(mp); | 677 | xfs_mountfs_check_barriers(mp); |
683 | } | 678 | } else { |
684 | 679 | mp->m_flags &= ~XFS_MOUNT_BARRIER; | |
685 | if (!(vfsp->vfs_flag & VFS_RDONLY) && | 680 | } |
686 | (*flags & MS_RDONLY)) { | 681 | } else if (!(vfsp->vfs_flag & VFS_RDONLY)) { /* rw -> ro */ |
687 | VFS_SYNC(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL, error); | 682 | VFS_SYNC(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL, error); |
688 | |||
689 | xfs_quiesce_fs(mp); | 683 | xfs_quiesce_fs(mp); |
690 | |||
691 | /* Ok now write out an unmount record */ | ||
692 | xfs_log_unmount_write(mp); | 684 | xfs_log_unmount_write(mp); |
693 | xfs_unmountfs_writesb(mp); | 685 | xfs_unmountfs_writesb(mp); |
694 | vfsp->vfs_flag |= VFS_RDONLY; | 686 | vfsp->vfs_flag |= VFS_RDONLY; |
695 | } | 687 | } |
696 | |||
697 | return 0; | 688 | return 0; |
698 | } | 689 | } |
699 | 690 | ||
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index fa71b305ba5c..7027ae68ee38 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -2663,7 +2663,7 @@ xfs_link( | |||
2663 | */ | 2663 | */ |
2664 | if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && | 2664 | if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && |
2665 | (tdp->i_d.di_projid != sip->i_d.di_projid))) { | 2665 | (tdp->i_d.di_projid != sip->i_d.di_projid))) { |
2666 | error = XFS_ERROR(EPERM); | 2666 | error = XFS_ERROR(EXDEV); |
2667 | goto error_return; | 2667 | goto error_return; |
2668 | } | 2668 | } |
2669 | 2669 | ||