diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /fs/fuse/dev.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r-- | fs/fuse/dev.c | 202 |
1 files changed, 174 insertions, 28 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index cde755cca564..640fc229df10 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -251,6 +251,20 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req) | |||
251 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | 251 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
252 | } | 252 | } |
253 | 253 | ||
254 | void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, | ||
255 | u64 nodeid, u64 nlookup) | ||
256 | { | ||
257 | forget->forget_one.nodeid = nodeid; | ||
258 | forget->forget_one.nlookup = nlookup; | ||
259 | |||
260 | spin_lock(&fc->lock); | ||
261 | fc->forget_list_tail->next = forget; | ||
262 | fc->forget_list_tail = forget; | ||
263 | wake_up(&fc->waitq); | ||
264 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | ||
265 | spin_unlock(&fc->lock); | ||
266 | } | ||
267 | |||
254 | static void flush_bg_queue(struct fuse_conn *fc) | 268 | static void flush_bg_queue(struct fuse_conn *fc) |
255 | { | 269 | { |
256 | while (fc->active_background < fc->max_background && | 270 | while (fc->active_background < fc->max_background && |
@@ -438,12 +452,6 @@ static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) | |||
438 | } | 452 | } |
439 | } | 453 | } |
440 | 454 | ||
441 | void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req) | ||
442 | { | ||
443 | req->isreply = 0; | ||
444 | fuse_request_send_nowait(fc, req); | ||
445 | } | ||
446 | |||
447 | void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) | 455 | void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) |
448 | { | 456 | { |
449 | req->isreply = 1; | 457 | req->isreply = 1; |
@@ -729,14 +737,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) | |||
729 | if (WARN_ON(PageMlocked(oldpage))) | 737 | if (WARN_ON(PageMlocked(oldpage))) |
730 | goto out_fallback_unlock; | 738 | goto out_fallback_unlock; |
731 | 739 | ||
732 | remove_from_page_cache(oldpage); | 740 | err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL); |
733 | page_cache_release(oldpage); | ||
734 | |||
735 | err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL); | ||
736 | if (err) { | 741 | if (err) { |
737 | printk(KERN_WARNING "fuse_try_move_page: failed to add page"); | 742 | unlock_page(newpage); |
738 | goto out_fallback_unlock; | 743 | return err; |
739 | } | 744 | } |
745 | |||
740 | page_cache_get(newpage); | 746 | page_cache_get(newpage); |
741 | 747 | ||
742 | if (!(buf->flags & PIPE_BUF_FLAG_LRU)) | 748 | if (!(buf->flags & PIPE_BUF_FLAG_LRU)) |
@@ -809,11 +815,9 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, | |||
809 | int err; | 815 | int err; |
810 | struct page *page = *pagep; | 816 | struct page *page = *pagep; |
811 | 817 | ||
812 | if (page && zeroing && count < PAGE_SIZE) { | 818 | if (page && zeroing && count < PAGE_SIZE) |
813 | void *mapaddr = kmap_atomic(page, KM_USER1); | 819 | clear_highpage(page); |
814 | memset(mapaddr, 0, PAGE_SIZE); | 820 | |
815 | kunmap_atomic(mapaddr, KM_USER1); | ||
816 | } | ||
817 | while (count) { | 821 | while (count) { |
818 | if (cs->write && cs->pipebufs && page) { | 822 | if (cs->write && cs->pipebufs && page) { |
819 | return fuse_ref_page(cs, page, offset, count); | 823 | return fuse_ref_page(cs, page, offset, count); |
@@ -830,10 +834,10 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, | |||
830 | } | 834 | } |
831 | } | 835 | } |
832 | if (page) { | 836 | if (page) { |
833 | void *mapaddr = kmap_atomic(page, KM_USER1); | 837 | void *mapaddr = kmap_atomic(page, KM_USER0); |
834 | void *buf = mapaddr + offset; | 838 | void *buf = mapaddr + offset; |
835 | offset += fuse_copy_do(cs, &buf, &count); | 839 | offset += fuse_copy_do(cs, &buf, &count); |
836 | kunmap_atomic(mapaddr, KM_USER1); | 840 | kunmap_atomic(mapaddr, KM_USER0); |
837 | } else | 841 | } else |
838 | offset += fuse_copy_do(cs, NULL, &count); | 842 | offset += fuse_copy_do(cs, NULL, &count); |
839 | } | 843 | } |
@@ -898,9 +902,15 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, | |||
898 | return err; | 902 | return err; |
899 | } | 903 | } |
900 | 904 | ||
905 | static int forget_pending(struct fuse_conn *fc) | ||
906 | { | ||
907 | return fc->forget_list_head.next != NULL; | ||
908 | } | ||
909 | |||
901 | static int request_pending(struct fuse_conn *fc) | 910 | static int request_pending(struct fuse_conn *fc) |
902 | { | 911 | { |
903 | return !list_empty(&fc->pending) || !list_empty(&fc->interrupts); | 912 | return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) || |
913 | forget_pending(fc); | ||
904 | } | 914 | } |
905 | 915 | ||
906 | /* Wait until a request is available on the pending list */ | 916 | /* Wait until a request is available on the pending list */ |
@@ -962,6 +972,120 @@ __releases(fc->lock) | |||
962 | return err ? err : reqsize; | 972 | return err ? err : reqsize; |
963 | } | 973 | } |
964 | 974 | ||
975 | static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc, | ||
976 | unsigned max, | ||
977 | unsigned *countp) | ||
978 | { | ||
979 | struct fuse_forget_link *head = fc->forget_list_head.next; | ||
980 | struct fuse_forget_link **newhead = &head; | ||
981 | unsigned count; | ||
982 | |||
983 | for (count = 0; *newhead != NULL && count < max; count++) | ||
984 | newhead = &(*newhead)->next; | ||
985 | |||
986 | fc->forget_list_head.next = *newhead; | ||
987 | *newhead = NULL; | ||
988 | if (fc->forget_list_head.next == NULL) | ||
989 | fc->forget_list_tail = &fc->forget_list_head; | ||
990 | |||
991 | if (countp != NULL) | ||
992 | *countp = count; | ||
993 | |||
994 | return head; | ||
995 | } | ||
996 | |||
997 | static int fuse_read_single_forget(struct fuse_conn *fc, | ||
998 | struct fuse_copy_state *cs, | ||
999 | size_t nbytes) | ||
1000 | __releases(fc->lock) | ||
1001 | { | ||
1002 | int err; | ||
1003 | struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL); | ||
1004 | struct fuse_forget_in arg = { | ||
1005 | .nlookup = forget->forget_one.nlookup, | ||
1006 | }; | ||
1007 | struct fuse_in_header ih = { | ||
1008 | .opcode = FUSE_FORGET, | ||
1009 | .nodeid = forget->forget_one.nodeid, | ||
1010 | .unique = fuse_get_unique(fc), | ||
1011 | .len = sizeof(ih) + sizeof(arg), | ||
1012 | }; | ||
1013 | |||
1014 | spin_unlock(&fc->lock); | ||
1015 | kfree(forget); | ||
1016 | if (nbytes < ih.len) | ||
1017 | return -EINVAL; | ||
1018 | |||
1019 | err = fuse_copy_one(cs, &ih, sizeof(ih)); | ||
1020 | if (!err) | ||
1021 | err = fuse_copy_one(cs, &arg, sizeof(arg)); | ||
1022 | fuse_copy_finish(cs); | ||
1023 | |||
1024 | if (err) | ||
1025 | return err; | ||
1026 | |||
1027 | return ih.len; | ||
1028 | } | ||
1029 | |||
1030 | static int fuse_read_batch_forget(struct fuse_conn *fc, | ||
1031 | struct fuse_copy_state *cs, size_t nbytes) | ||
1032 | __releases(fc->lock) | ||
1033 | { | ||
1034 | int err; | ||
1035 | unsigned max_forgets; | ||
1036 | unsigned count; | ||
1037 | struct fuse_forget_link *head; | ||
1038 | struct fuse_batch_forget_in arg = { .count = 0 }; | ||
1039 | struct fuse_in_header ih = { | ||
1040 | .opcode = FUSE_BATCH_FORGET, | ||
1041 | .unique = fuse_get_unique(fc), | ||
1042 | .len = sizeof(ih) + sizeof(arg), | ||
1043 | }; | ||
1044 | |||
1045 | if (nbytes < ih.len) { | ||
1046 | spin_unlock(&fc->lock); | ||
1047 | return -EINVAL; | ||
1048 | } | ||
1049 | |||
1050 | max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); | ||
1051 | head = dequeue_forget(fc, max_forgets, &count); | ||
1052 | spin_unlock(&fc->lock); | ||
1053 | |||
1054 | arg.count = count; | ||
1055 | ih.len += count * sizeof(struct fuse_forget_one); | ||
1056 | err = fuse_copy_one(cs, &ih, sizeof(ih)); | ||
1057 | if (!err) | ||
1058 | err = fuse_copy_one(cs, &arg, sizeof(arg)); | ||
1059 | |||
1060 | while (head) { | ||
1061 | struct fuse_forget_link *forget = head; | ||
1062 | |||
1063 | if (!err) { | ||
1064 | err = fuse_copy_one(cs, &forget->forget_one, | ||
1065 | sizeof(forget->forget_one)); | ||
1066 | } | ||
1067 | head = forget->next; | ||
1068 | kfree(forget); | ||
1069 | } | ||
1070 | |||
1071 | fuse_copy_finish(cs); | ||
1072 | |||
1073 | if (err) | ||
1074 | return err; | ||
1075 | |||
1076 | return ih.len; | ||
1077 | } | ||
1078 | |||
1079 | static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs, | ||
1080 | size_t nbytes) | ||
1081 | __releases(fc->lock) | ||
1082 | { | ||
1083 | if (fc->minor < 16 || fc->forget_list_head.next->next == NULL) | ||
1084 | return fuse_read_single_forget(fc, cs, nbytes); | ||
1085 | else | ||
1086 | return fuse_read_batch_forget(fc, cs, nbytes); | ||
1087 | } | ||
1088 | |||
965 | /* | 1089 | /* |
966 | * Read a single request into the userspace filesystem's buffer. This | 1090 | * Read a single request into the userspace filesystem's buffer. This |
967 | * function waits until a request is available, then removes it from | 1091 | * function waits until a request is available, then removes it from |
@@ -1000,6 +1124,14 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, | |||
1000 | return fuse_read_interrupt(fc, cs, nbytes, req); | 1124 | return fuse_read_interrupt(fc, cs, nbytes, req); |
1001 | } | 1125 | } |
1002 | 1126 | ||
1127 | if (forget_pending(fc)) { | ||
1128 | if (list_empty(&fc->pending) || fc->forget_batch-- > 0) | ||
1129 | return fuse_read_forget(fc, cs, nbytes); | ||
1130 | |||
1131 | if (fc->forget_batch <= -8) | ||
1132 | fc->forget_batch = 16; | ||
1133 | } | ||
1134 | |||
1003 | req = list_entry(fc->pending.next, struct fuse_req, list); | 1135 | req = list_entry(fc->pending.next, struct fuse_req, list); |
1004 | req->state = FUSE_REQ_READING; | 1136 | req->state = FUSE_REQ_READING; |
1005 | list_move(&req->list, &fc->io); | 1137 | list_move(&req->list, &fc->io); |
@@ -1092,7 +1224,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, | |||
1092 | if (!fc) | 1224 | if (!fc) |
1093 | return -EPERM; | 1225 | return -EPERM; |
1094 | 1226 | ||
1095 | bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL); | 1227 | bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); |
1096 | if (!bufs) | 1228 | if (!bufs) |
1097 | return -ENOMEM; | 1229 | return -ENOMEM; |
1098 | 1230 | ||
@@ -1336,12 +1468,7 @@ out_finish: | |||
1336 | 1468 | ||
1337 | static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) | 1469 | static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) |
1338 | { | 1470 | { |
1339 | int i; | 1471 | release_pages(req->pages, req->num_pages, 0); |
1340 | |||
1341 | for (i = 0; i < req->num_pages; i++) { | ||
1342 | struct page *page = req->pages[i]; | ||
1343 | page_cache_release(page); | ||
1344 | } | ||
1345 | } | 1472 | } |
1346 | 1473 | ||
1347 | static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, | 1474 | static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, |
@@ -1633,7 +1760,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, | |||
1633 | if (!fc) | 1760 | if (!fc) |
1634 | return -EPERM; | 1761 | return -EPERM; |
1635 | 1762 | ||
1636 | bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL); | 1763 | bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); |
1637 | if (!bufs) | 1764 | if (!bufs) |
1638 | return -ENOMEM; | 1765 | return -ENOMEM; |
1639 | 1766 | ||
@@ -1777,6 +1904,23 @@ __acquires(fc->lock) | |||
1777 | flush_bg_queue(fc); | 1904 | flush_bg_queue(fc); |
1778 | end_requests(fc, &fc->pending); | 1905 | end_requests(fc, &fc->pending); |
1779 | end_requests(fc, &fc->processing); | 1906 | end_requests(fc, &fc->processing); |
1907 | while (forget_pending(fc)) | ||
1908 | kfree(dequeue_forget(fc, 1, NULL)); | ||
1909 | } | ||
1910 | |||
1911 | static void end_polls(struct fuse_conn *fc) | ||
1912 | { | ||
1913 | struct rb_node *p; | ||
1914 | |||
1915 | p = rb_first(&fc->polled_files); | ||
1916 | |||
1917 | while (p) { | ||
1918 | struct fuse_file *ff; | ||
1919 | ff = rb_entry(p, struct fuse_file, polled_node); | ||
1920 | wake_up_interruptible_all(&ff->poll_wait); | ||
1921 | |||
1922 | p = rb_next(p); | ||
1923 | } | ||
1780 | } | 1924 | } |
1781 | 1925 | ||
1782 | /* | 1926 | /* |
@@ -1806,6 +1950,7 @@ void fuse_abort_conn(struct fuse_conn *fc) | |||
1806 | fc->blocked = 0; | 1950 | fc->blocked = 0; |
1807 | end_io_requests(fc); | 1951 | end_io_requests(fc); |
1808 | end_queued_requests(fc); | 1952 | end_queued_requests(fc); |
1953 | end_polls(fc); | ||
1809 | wake_up_all(&fc->waitq); | 1954 | wake_up_all(&fc->waitq); |
1810 | wake_up_all(&fc->blocked_waitq); | 1955 | wake_up_all(&fc->blocked_waitq); |
1811 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | 1956 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
@@ -1822,6 +1967,7 @@ int fuse_dev_release(struct inode *inode, struct file *file) | |||
1822 | fc->connected = 0; | 1967 | fc->connected = 0; |
1823 | fc->blocked = 0; | 1968 | fc->blocked = 0; |
1824 | end_queued_requests(fc); | 1969 | end_queued_requests(fc); |
1970 | end_polls(fc); | ||
1825 | wake_up_all(&fc->blocked_waitq); | 1971 | wake_up_all(&fc->blocked_waitq); |
1826 | spin_unlock(&fc->lock); | 1972 | spin_unlock(&fc->lock); |
1827 | fuse_conn_put(fc); | 1973 | fuse_conn_put(fc); |