aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-10 10:43:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-10 10:43:54 -0500
commit7d44b0440147d83a65270205b22e7d365de28948 (patch)
tree0adc818f569f45912a19482773dabbf71b191b9d /fs/fuse
parent0dc1488527a3c01383a50e5df7187219567586a3 (diff)
parent1baa26b2be92fe9917e2f7ef46d423b5dfa4da71 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse: fuse: fix ioctl ABI fuse: allow batching of FORGET requests fuse: separate queue for FORGET requests fuse: ioctl cleanup Fix up trivial conflict in fs/fuse/inode.c due to RCU lookup having done the RCU-freeing of the inode in fuse_destroy_inode().
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/dev.c156
-rw-r--r--fs/fuse/dir.c53
-rw-r--r--fs/fuse/file.c66
-rw-r--r--fs/fuse/fuse_i.h27
-rw-r--r--fs/fuse/inode.c30
5 files changed, 256 insertions, 76 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 6e07696308d..cf8d28d1fba 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -251,6 +251,20 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
251 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 251 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
252} 252}
253 253
254void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
255 u64 nodeid, u64 nlookup)
256{
257 forget->forget_one.nodeid = nodeid;
258 forget->forget_one.nlookup = nlookup;
259
260 spin_lock(&fc->lock);
261 fc->forget_list_tail->next = forget;
262 fc->forget_list_tail = forget;
263 wake_up(&fc->waitq);
264 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
265 spin_unlock(&fc->lock);
266}
267
254static void flush_bg_queue(struct fuse_conn *fc) 268static void flush_bg_queue(struct fuse_conn *fc)
255{ 269{
256 while (fc->active_background < fc->max_background && 270 while (fc->active_background < fc->max_background &&
@@ -438,12 +452,6 @@ static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
438 } 452 }
439} 453}
440 454
441void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
442{
443 req->isreply = 0;
444 fuse_request_send_nowait(fc, req);
445}
446
447void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) 455void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
448{ 456{
449 req->isreply = 1; 457 req->isreply = 1;
@@ -896,9 +904,15 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
896 return err; 904 return err;
897} 905}
898 906
907static int forget_pending(struct fuse_conn *fc)
908{
909 return fc->forget_list_head.next != NULL;
910}
911
899static int request_pending(struct fuse_conn *fc) 912static int request_pending(struct fuse_conn *fc)
900{ 913{
901 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts); 914 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
915 forget_pending(fc);
902} 916}
903 917
904/* Wait until a request is available on the pending list */ 918/* Wait until a request is available on the pending list */
@@ -960,6 +974,120 @@ __releases(fc->lock)
960 return err ? err : reqsize; 974 return err ? err : reqsize;
961} 975}
962 976
977static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
978 unsigned max,
979 unsigned *countp)
980{
981 struct fuse_forget_link *head = fc->forget_list_head.next;
982 struct fuse_forget_link **newhead = &head;
983 unsigned count;
984
985 for (count = 0; *newhead != NULL && count < max; count++)
986 newhead = &(*newhead)->next;
987
988 fc->forget_list_head.next = *newhead;
989 *newhead = NULL;
990 if (fc->forget_list_head.next == NULL)
991 fc->forget_list_tail = &fc->forget_list_head;
992
993 if (countp != NULL)
994 *countp = count;
995
996 return head;
997}
998
999static int fuse_read_single_forget(struct fuse_conn *fc,
1000 struct fuse_copy_state *cs,
1001 size_t nbytes)
1002__releases(fc->lock)
1003{
1004 int err;
1005 struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
1006 struct fuse_forget_in arg = {
1007 .nlookup = forget->forget_one.nlookup,
1008 };
1009 struct fuse_in_header ih = {
1010 .opcode = FUSE_FORGET,
1011 .nodeid = forget->forget_one.nodeid,
1012 .unique = fuse_get_unique(fc),
1013 .len = sizeof(ih) + sizeof(arg),
1014 };
1015
1016 spin_unlock(&fc->lock);
1017 kfree(forget);
1018 if (nbytes < ih.len)
1019 return -EINVAL;
1020
1021 err = fuse_copy_one(cs, &ih, sizeof(ih));
1022 if (!err)
1023 err = fuse_copy_one(cs, &arg, sizeof(arg));
1024 fuse_copy_finish(cs);
1025
1026 if (err)
1027 return err;
1028
1029 return ih.len;
1030}
1031
1032static int fuse_read_batch_forget(struct fuse_conn *fc,
1033 struct fuse_copy_state *cs, size_t nbytes)
1034__releases(fc->lock)
1035{
1036 int err;
1037 unsigned max_forgets;
1038 unsigned count;
1039 struct fuse_forget_link *head;
1040 struct fuse_batch_forget_in arg = { .count = 0 };
1041 struct fuse_in_header ih = {
1042 .opcode = FUSE_BATCH_FORGET,
1043 .unique = fuse_get_unique(fc),
1044 .len = sizeof(ih) + sizeof(arg),
1045 };
1046
1047 if (nbytes < ih.len) {
1048 spin_unlock(&fc->lock);
1049 return -EINVAL;
1050 }
1051
1052 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1053 head = dequeue_forget(fc, max_forgets, &count);
1054 spin_unlock(&fc->lock);
1055
1056 arg.count = count;
1057 ih.len += count * sizeof(struct fuse_forget_one);
1058 err = fuse_copy_one(cs, &ih, sizeof(ih));
1059 if (!err)
1060 err = fuse_copy_one(cs, &arg, sizeof(arg));
1061
1062 while (head) {
1063 struct fuse_forget_link *forget = head;
1064
1065 if (!err) {
1066 err = fuse_copy_one(cs, &forget->forget_one,
1067 sizeof(forget->forget_one));
1068 }
1069 head = forget->next;
1070 kfree(forget);
1071 }
1072
1073 fuse_copy_finish(cs);
1074
1075 if (err)
1076 return err;
1077
1078 return ih.len;
1079}
1080
1081static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
1082 size_t nbytes)
1083__releases(fc->lock)
1084{
1085 if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
1086 return fuse_read_single_forget(fc, cs, nbytes);
1087 else
1088 return fuse_read_batch_forget(fc, cs, nbytes);
1089}
1090
963/* 1091/*
964 * Read a single request into the userspace filesystem's buffer. This 1092 * Read a single request into the userspace filesystem's buffer. This
965 * function waits until a request is available, then removes it from 1093 * function waits until a request is available, then removes it from
@@ -998,6 +1126,14 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
998 return fuse_read_interrupt(fc, cs, nbytes, req); 1126 return fuse_read_interrupt(fc, cs, nbytes, req);
999 } 1127 }
1000 1128
1129 if (forget_pending(fc)) {
1130 if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
1131 return fuse_read_forget(fc, cs, nbytes);
1132
1133 if (fc->forget_batch <= -8)
1134 fc->forget_batch = 16;
1135 }
1136
1001 req = list_entry(fc->pending.next, struct fuse_req, list); 1137 req = list_entry(fc->pending.next, struct fuse_req, list);
1002 req->state = FUSE_REQ_READING; 1138 req->state = FUSE_REQ_READING;
1003 list_move(&req->list, &fc->io); 1139 list_move(&req->list, &fc->io);
@@ -1090,7 +1226,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1090 if (!fc) 1226 if (!fc)
1091 return -EPERM; 1227 return -EPERM;
1092 1228
1093 bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL); 1229 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1094 if (!bufs) 1230 if (!bufs)
1095 return -ENOMEM; 1231 return -ENOMEM;
1096 1232
@@ -1626,7 +1762,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1626 if (!fc) 1762 if (!fc)
1627 return -EPERM; 1763 return -EPERM;
1628 1764
1629 bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL); 1765 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1630 if (!bufs) 1766 if (!bufs)
1631 return -ENOMEM; 1767 return -ENOMEM;
1632 1768
@@ -1770,6 +1906,8 @@ __acquires(fc->lock)
1770 flush_bg_queue(fc); 1906 flush_bg_queue(fc);
1771 end_requests(fc, &fc->pending); 1907 end_requests(fc, &fc->pending);
1772 end_requests(fc, &fc->processing); 1908 end_requests(fc, &fc->processing);
1909 while (forget_pending(fc))
1910 kfree(dequeue_forget(fc, 1, NULL));
1773} 1911}
1774 1912
1775/* 1913/*
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index f738599fd8c..042af7346ec 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -10,9 +10,9 @@
10 10
11#include <linux/pagemap.h> 11#include <linux/pagemap.h>
12#include <linux/file.h> 12#include <linux/file.h>
13#include <linux/gfp.h>
14#include <linux/sched.h> 13#include <linux/sched.h>
15#include <linux/namei.h> 14#include <linux/namei.h>
15#include <linux/slab.h>
16 16
17#if BITS_PER_LONG >= 64 17#if BITS_PER_LONG >= 64
18static inline void fuse_dentry_settime(struct dentry *entry, u64 time) 18static inline void fuse_dentry_settime(struct dentry *entry, u64 time)
@@ -169,7 +169,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
169 struct fuse_entry_out outarg; 169 struct fuse_entry_out outarg;
170 struct fuse_conn *fc; 170 struct fuse_conn *fc;
171 struct fuse_req *req; 171 struct fuse_req *req;
172 struct fuse_req *forget_req; 172 struct fuse_forget_link *forget;
173 struct dentry *parent; 173 struct dentry *parent;
174 u64 attr_version; 174 u64 attr_version;
175 175
@@ -182,8 +182,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
182 if (IS_ERR(req)) 182 if (IS_ERR(req))
183 return 0; 183 return 0;
184 184
185 forget_req = fuse_get_req(fc); 185 forget = fuse_alloc_forget();
186 if (IS_ERR(forget_req)) { 186 if (!forget) {
187 fuse_put_request(fc, req); 187 fuse_put_request(fc, req);
188 return 0; 188 return 0;
189 } 189 }
@@ -203,15 +203,14 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
203 if (!err) { 203 if (!err) {
204 struct fuse_inode *fi = get_fuse_inode(inode); 204 struct fuse_inode *fi = get_fuse_inode(inode);
205 if (outarg.nodeid != get_node_id(inode)) { 205 if (outarg.nodeid != get_node_id(inode)) {
206 fuse_send_forget(fc, forget_req, 206 fuse_queue_forget(fc, forget, outarg.nodeid, 1);
207 outarg.nodeid, 1);
208 return 0; 207 return 0;
209 } 208 }
210 spin_lock(&fc->lock); 209 spin_lock(&fc->lock);
211 fi->nlookup++; 210 fi->nlookup++;
212 spin_unlock(&fc->lock); 211 spin_unlock(&fc->lock);
213 } 212 }
214 fuse_put_request(fc, forget_req); 213 kfree(forget);
215 if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT) 214 if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
216 return 0; 215 return 0;
217 216
@@ -263,7 +262,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
263{ 262{
264 struct fuse_conn *fc = get_fuse_conn_super(sb); 263 struct fuse_conn *fc = get_fuse_conn_super(sb);
265 struct fuse_req *req; 264 struct fuse_req *req;
266 struct fuse_req *forget_req; 265 struct fuse_forget_link *forget;
267 u64 attr_version; 266 u64 attr_version;
268 int err; 267 int err;
269 268
@@ -277,9 +276,9 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
277 if (IS_ERR(req)) 276 if (IS_ERR(req))
278 goto out; 277 goto out;
279 278
280 forget_req = fuse_get_req(fc); 279 forget = fuse_alloc_forget();
281 err = PTR_ERR(forget_req); 280 err = -ENOMEM;
282 if (IS_ERR(forget_req)) { 281 if (!forget) {
283 fuse_put_request(fc, req); 282 fuse_put_request(fc, req);
284 goto out; 283 goto out;
285 } 284 }
@@ -305,13 +304,13 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
305 attr_version); 304 attr_version);
306 err = -ENOMEM; 305 err = -ENOMEM;
307 if (!*inode) { 306 if (!*inode) {
308 fuse_send_forget(fc, forget_req, outarg->nodeid, 1); 307 fuse_queue_forget(fc, forget, outarg->nodeid, 1);
309 goto out; 308 goto out;
310 } 309 }
311 err = 0; 310 err = 0;
312 311
313 out_put_forget: 312 out_put_forget:
314 fuse_put_request(fc, forget_req); 313 kfree(forget);
315 out: 314 out:
316 return err; 315 return err;
317} 316}
@@ -378,7 +377,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
378 struct inode *inode; 377 struct inode *inode;
379 struct fuse_conn *fc = get_fuse_conn(dir); 378 struct fuse_conn *fc = get_fuse_conn(dir);
380 struct fuse_req *req; 379 struct fuse_req *req;
381 struct fuse_req *forget_req; 380 struct fuse_forget_link *forget;
382 struct fuse_create_in inarg; 381 struct fuse_create_in inarg;
383 struct fuse_open_out outopen; 382 struct fuse_open_out outopen;
384 struct fuse_entry_out outentry; 383 struct fuse_entry_out outentry;
@@ -392,9 +391,9 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
392 if (flags & O_DIRECT) 391 if (flags & O_DIRECT)
393 return -EINVAL; 392 return -EINVAL;
394 393
395 forget_req = fuse_get_req(fc); 394 forget = fuse_alloc_forget();
396 if (IS_ERR(forget_req)) 395 if (!forget)
397 return PTR_ERR(forget_req); 396 return -ENOMEM;
398 397
399 req = fuse_get_req(fc); 398 req = fuse_get_req(fc);
400 err = PTR_ERR(req); 399 err = PTR_ERR(req);
@@ -452,10 +451,10 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
452 if (!inode) { 451 if (!inode) {
453 flags &= ~(O_CREAT | O_EXCL | O_TRUNC); 452 flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
454 fuse_sync_release(ff, flags); 453 fuse_sync_release(ff, flags);
455 fuse_send_forget(fc, forget_req, outentry.nodeid, 1); 454 fuse_queue_forget(fc, forget, outentry.nodeid, 1);
456 return -ENOMEM; 455 return -ENOMEM;
457 } 456 }
458 fuse_put_request(fc, forget_req); 457 kfree(forget);
459 d_instantiate(entry, inode); 458 d_instantiate(entry, inode);
460 fuse_change_entry_timeout(entry, &outentry); 459 fuse_change_entry_timeout(entry, &outentry);
461 fuse_invalidate_attr(dir); 460 fuse_invalidate_attr(dir);
@@ -473,7 +472,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
473 out_put_request: 472 out_put_request:
474 fuse_put_request(fc, req); 473 fuse_put_request(fc, req);
475 out_put_forget_req: 474 out_put_forget_req:
476 fuse_put_request(fc, forget_req); 475 kfree(forget);
477 return err; 476 return err;
478} 477}
479 478
@@ -487,12 +486,12 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
487 struct fuse_entry_out outarg; 486 struct fuse_entry_out outarg;
488 struct inode *inode; 487 struct inode *inode;
489 int err; 488 int err;
490 struct fuse_req *forget_req; 489 struct fuse_forget_link *forget;
491 490
492 forget_req = fuse_get_req(fc); 491 forget = fuse_alloc_forget();
493 if (IS_ERR(forget_req)) { 492 if (!forget) {
494 fuse_put_request(fc, req); 493 fuse_put_request(fc, req);
495 return PTR_ERR(forget_req); 494 return -ENOMEM;
496 } 495 }
497 496
498 memset(&outarg, 0, sizeof(outarg)); 497 memset(&outarg, 0, sizeof(outarg));
@@ -519,10 +518,10 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
519 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, 518 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
520 &outarg.attr, entry_attr_timeout(&outarg), 0); 519 &outarg.attr, entry_attr_timeout(&outarg), 0);
521 if (!inode) { 520 if (!inode) {
522 fuse_send_forget(fc, forget_req, outarg.nodeid, 1); 521 fuse_queue_forget(fc, forget, outarg.nodeid, 1);
523 return -ENOMEM; 522 return -ENOMEM;
524 } 523 }
525 fuse_put_request(fc, forget_req); 524 kfree(forget);
526 525
527 if (S_ISDIR(inode->i_mode)) { 526 if (S_ISDIR(inode->i_mode)) {
528 struct dentry *alias; 527 struct dentry *alias;
@@ -545,7 +544,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
545 return 0; 544 return 0;
546 545
547 out_put_forget_req: 546 out_put_forget_req:
548 fuse_put_request(fc, forget_req); 547 kfree(forget);
549 return err; 548 return err;
550} 549}
551 550
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 8b984a2cebb..95da1bc1c82 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1634,9 +1634,9 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
1634 * and 64bit. Fortunately we can determine which structure the server 1634 * and 64bit. Fortunately we can determine which structure the server
1635 * used from the size of the reply. 1635 * used from the size of the reply.
1636 */ 1636 */
1637static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, 1637static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src,
1638 size_t transferred, unsigned count, 1638 size_t transferred, unsigned count,
1639 bool is_compat) 1639 bool is_compat)
1640{ 1640{
1641#ifdef CONFIG_COMPAT 1641#ifdef CONFIG_COMPAT
1642 if (count * sizeof(struct compat_iovec) == transferred) { 1642 if (count * sizeof(struct compat_iovec) == transferred) {
@@ -1680,6 +1680,42 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
1680 return 0; 1680 return 0;
1681} 1681}
1682 1682
1683static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
1684 void *src, size_t transferred, unsigned count,
1685 bool is_compat)
1686{
1687 unsigned i;
1688 struct fuse_ioctl_iovec *fiov = src;
1689
1690 if (fc->minor < 16) {
1691 return fuse_copy_ioctl_iovec_old(dst, src, transferred,
1692 count, is_compat);
1693 }
1694
1695 if (count * sizeof(struct fuse_ioctl_iovec) != transferred)
1696 return -EIO;
1697
1698 for (i = 0; i < count; i++) {
1699 /* Did the server supply an inappropriate value? */
1700 if (fiov[i].base != (unsigned long) fiov[i].base ||
1701 fiov[i].len != (unsigned long) fiov[i].len)
1702 return -EIO;
1703
1704 dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base;
1705 dst[i].iov_len = (size_t) fiov[i].len;
1706
1707#ifdef CONFIG_COMPAT
1708 if (is_compat &&
1709 (ptr_to_compat(dst[i].iov_base) != fiov[i].base ||
1710 (compat_size_t) dst[i].iov_len != fiov[i].len))
1711 return -EIO;
1712#endif
1713 }
1714
1715 return 0;
1716}
1717
1718
1683/* 1719/*
1684 * For ioctls, there is no generic way to determine how much memory 1720 * For ioctls, there is no generic way to determine how much memory
1685 * needs to be read and/or written. Furthermore, ioctls are allowed 1721 * needs to be read and/or written. Furthermore, ioctls are allowed
@@ -1740,18 +1776,25 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1740 struct fuse_ioctl_out outarg; 1776 struct fuse_ioctl_out outarg;
1741 struct fuse_req *req = NULL; 1777 struct fuse_req *req = NULL;
1742 struct page **pages = NULL; 1778 struct page **pages = NULL;
1743 struct page *iov_page = NULL; 1779 struct iovec *iov_page = NULL;
1744 struct iovec *in_iov = NULL, *out_iov = NULL; 1780 struct iovec *in_iov = NULL, *out_iov = NULL;
1745 unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; 1781 unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
1746 size_t in_size, out_size, transferred; 1782 size_t in_size, out_size, transferred;
1747 int err; 1783 int err;
1748 1784
1785#if BITS_PER_LONG == 32
1786 inarg.flags |= FUSE_IOCTL_32BIT;
1787#else
1788 if (flags & FUSE_IOCTL_COMPAT)
1789 inarg.flags |= FUSE_IOCTL_32BIT;
1790#endif
1791
1749 /* assume all the iovs returned by client always fits in a page */ 1792 /* assume all the iovs returned by client always fits in a page */
1750 BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); 1793 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
1751 1794
1752 err = -ENOMEM; 1795 err = -ENOMEM;
1753 pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL); 1796 pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL);
1754 iov_page = alloc_page(GFP_KERNEL); 1797 iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
1755 if (!pages || !iov_page) 1798 if (!pages || !iov_page)
1756 goto out; 1799 goto out;
1757 1800
@@ -1760,7 +1803,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1760 * RETRY from server is not allowed. 1803 * RETRY from server is not allowed.
1761 */ 1804 */
1762 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { 1805 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
1763 struct iovec *iov = page_address(iov_page); 1806 struct iovec *iov = iov_page;
1764 1807
1765 iov->iov_base = (void __user *)arg; 1808 iov->iov_base = (void __user *)arg;
1766 iov->iov_len = _IOC_SIZE(cmd); 1809 iov->iov_len = _IOC_SIZE(cmd);
@@ -1841,7 +1884,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1841 1884
1842 /* did it ask for retry? */ 1885 /* did it ask for retry? */
1843 if (outarg.flags & FUSE_IOCTL_RETRY) { 1886 if (outarg.flags & FUSE_IOCTL_RETRY) {
1844 char *vaddr; 1887 void *vaddr;
1845 1888
1846 /* no retry if in restricted mode */ 1889 /* no retry if in restricted mode */
1847 err = -EIO; 1890 err = -EIO;
@@ -1862,14 +1905,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1862 goto out; 1905 goto out;
1863 1906
1864 vaddr = kmap_atomic(pages[0], KM_USER0); 1907 vaddr = kmap_atomic(pages[0], KM_USER0);
1865 err = fuse_copy_ioctl_iovec(page_address(iov_page), vaddr, 1908 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
1866 transferred, in_iovs + out_iovs, 1909 transferred, in_iovs + out_iovs,
1867 (flags & FUSE_IOCTL_COMPAT) != 0); 1910 (flags & FUSE_IOCTL_COMPAT) != 0);
1868 kunmap_atomic(vaddr, KM_USER0); 1911 kunmap_atomic(vaddr, KM_USER0);
1869 if (err) 1912 if (err)
1870 goto out; 1913 goto out;
1871 1914
1872 in_iov = page_address(iov_page); 1915 in_iov = iov_page;
1873 out_iov = in_iov + in_iovs; 1916 out_iov = in_iov + in_iovs;
1874 1917
1875 err = fuse_verify_ioctl_iov(in_iov, in_iovs); 1918 err = fuse_verify_ioctl_iov(in_iov, in_iovs);
@@ -1891,8 +1934,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1891 out: 1934 out:
1892 if (req) 1935 if (req)
1893 fuse_put_request(fc, req); 1936 fuse_put_request(fc, req);
1894 if (iov_page) 1937 free_page((unsigned long) iov_page);
1895 __free_page(iov_page);
1896 while (num_pages) 1938 while (num_pages)
1897 __free_page(pages[--num_pages]); 1939 __free_page(pages[--num_pages]);
1898 kfree(pages); 1940 kfree(pages);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 57d4a3a0f10..ae5744a2f9e 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -53,6 +53,12 @@ extern struct mutex fuse_mutex;
53extern unsigned max_user_bgreq; 53extern unsigned max_user_bgreq;
54extern unsigned max_user_congthresh; 54extern unsigned max_user_congthresh;
55 55
56/* One forget request */
57struct fuse_forget_link {
58 struct fuse_forget_one forget_one;
59 struct fuse_forget_link *next;
60};
61
56/** FUSE inode */ 62/** FUSE inode */
57struct fuse_inode { 63struct fuse_inode {
58 /** Inode data */ 64 /** Inode data */
@@ -66,7 +72,7 @@ struct fuse_inode {
66 u64 nlookup; 72 u64 nlookup;
67 73
68 /** The request used for sending the FORGET message */ 74 /** The request used for sending the FORGET message */
69 struct fuse_req *forget_req; 75 struct fuse_forget_link *forget;
70 76
71 /** Time in jiffies until the file attributes are valid */ 77 /** Time in jiffies until the file attributes are valid */
72 u64 i_time; 78 u64 i_time;
@@ -255,7 +261,6 @@ struct fuse_req {
255 261
256 /** Data for asynchronous requests */ 262 /** Data for asynchronous requests */
257 union { 263 union {
258 struct fuse_forget_in forget_in;
259 struct { 264 struct {
260 struct fuse_release_in in; 265 struct fuse_release_in in;
261 struct path path; 266 struct path path;
@@ -369,6 +374,13 @@ struct fuse_conn {
369 /** Pending interrupts */ 374 /** Pending interrupts */
370 struct list_head interrupts; 375 struct list_head interrupts;
371 376
377 /** Queue of pending forgets */
378 struct fuse_forget_link forget_list_head;
379 struct fuse_forget_link *forget_list_tail;
380
381 /** Batching of FORGET requests (positive indicates FORGET batch) */
382 int forget_batch;
383
372 /** Flag indicating if connection is blocked. This will be 384 /** Flag indicating if connection is blocked. This will be
373 the case before the INIT reply is received, and if there 385 the case before the INIT reply is received, and if there
374 are too many outstading backgrounds requests */ 386 are too many outstading backgrounds requests */
@@ -543,8 +555,10 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
543/** 555/**
544 * Send FORGET command 556 * Send FORGET command
545 */ 557 */
546void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req, 558void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
547 u64 nodeid, u64 nlookup); 559 u64 nodeid, u64 nlookup);
560
561struct fuse_forget_link *fuse_alloc_forget(void);
548 562
549/** 563/**
550 * Initialize READ or READDIR request 564 * Initialize READ or READDIR request
@@ -656,11 +670,6 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req);
656void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req); 670void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req);
657 671
658/** 672/**
659 * Send a request with no reply
660 */
661void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req);
662
663/**
664 * Send a request in the background 673 * Send a request in the background
665 */ 674 */
666void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req); 675void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index a8b31da19b9..f62b32cffea 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -71,6 +71,11 @@ struct fuse_mount_data {
71 unsigned blksize; 71 unsigned blksize;
72}; 72};
73 73
74struct fuse_forget_link *fuse_alloc_forget()
75{
76 return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL);
77}
78
74static struct inode *fuse_alloc_inode(struct super_block *sb) 79static struct inode *fuse_alloc_inode(struct super_block *sb)
75{ 80{
76 struct inode *inode; 81 struct inode *inode;
@@ -90,8 +95,8 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
90 INIT_LIST_HEAD(&fi->queued_writes); 95 INIT_LIST_HEAD(&fi->queued_writes);
91 INIT_LIST_HEAD(&fi->writepages); 96 INIT_LIST_HEAD(&fi->writepages);
92 init_waitqueue_head(&fi->page_waitq); 97 init_waitqueue_head(&fi->page_waitq);
93 fi->forget_req = fuse_request_alloc(); 98 fi->forget = fuse_alloc_forget();
94 if (!fi->forget_req) { 99 if (!fi->forget) {
95 kmem_cache_free(fuse_inode_cachep, inode); 100 kmem_cache_free(fuse_inode_cachep, inode);
96 return NULL; 101 return NULL;
97 } 102 }
@@ -111,24 +116,10 @@ static void fuse_destroy_inode(struct inode *inode)
111 struct fuse_inode *fi = get_fuse_inode(inode); 116 struct fuse_inode *fi = get_fuse_inode(inode);
112 BUG_ON(!list_empty(&fi->write_files)); 117 BUG_ON(!list_empty(&fi->write_files));
113 BUG_ON(!list_empty(&fi->queued_writes)); 118 BUG_ON(!list_empty(&fi->queued_writes));
114 if (fi->forget_req) 119 kfree(fi->forget);
115 fuse_request_free(fi->forget_req);
116 call_rcu(&inode->i_rcu, fuse_i_callback); 120 call_rcu(&inode->i_rcu, fuse_i_callback);
117} 121}
118 122
119void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
120 u64 nodeid, u64 nlookup)
121{
122 struct fuse_forget_in *inarg = &req->misc.forget_in;
123 inarg->nlookup = nlookup;
124 req->in.h.opcode = FUSE_FORGET;
125 req->in.h.nodeid = nodeid;
126 req->in.numargs = 1;
127 req->in.args[0].size = sizeof(struct fuse_forget_in);
128 req->in.args[0].value = inarg;
129 fuse_request_send_noreply(fc, req);
130}
131
132static void fuse_evict_inode(struct inode *inode) 123static void fuse_evict_inode(struct inode *inode)
133{ 124{
134 truncate_inode_pages(&inode->i_data, 0); 125 truncate_inode_pages(&inode->i_data, 0);
@@ -136,8 +127,8 @@ static void fuse_evict_inode(struct inode *inode)
136 if (inode->i_sb->s_flags & MS_ACTIVE) { 127 if (inode->i_sb->s_flags & MS_ACTIVE) {
137 struct fuse_conn *fc = get_fuse_conn(inode); 128 struct fuse_conn *fc = get_fuse_conn(inode);
138 struct fuse_inode *fi = get_fuse_inode(inode); 129 struct fuse_inode *fi = get_fuse_inode(inode);
139 fuse_send_forget(fc, fi->forget_req, fi->nodeid, fi->nlookup); 130 fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup);
140 fi->forget_req = NULL; 131 fi->forget = NULL;
141 } 132 }
142} 133}
143 134
@@ -541,6 +532,7 @@ void fuse_conn_init(struct fuse_conn *fc)
541 INIT_LIST_HEAD(&fc->interrupts); 532 INIT_LIST_HEAD(&fc->interrupts);
542 INIT_LIST_HEAD(&fc->bg_queue); 533 INIT_LIST_HEAD(&fc->bg_queue);
543 INIT_LIST_HEAD(&fc->entry); 534 INIT_LIST_HEAD(&fc->entry);
535 fc->forget_list_tail = &fc->forget_list_head;
544 atomic_set(&fc->num_waiting, 0); 536 atomic_set(&fc->num_waiting, 0);
545 fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND; 537 fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
546 fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD; 538 fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;