aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse
diff options
context:
space:
mode:
authorMaxim Patlasov <mpatlasov@parallels.com>2012-10-26 11:50:36 -0400
committerMiklos Szeredi <mszeredi@suse.cz>2013-01-24 10:21:28 -0500
commit5565a9d884327ac45d49041f1b846dac273e110c (patch)
tree936c0ec109cc6c92e1264c33d8cde7ace637f97a /fs/fuse
parent7c190c8b9c0dd373cdd4d96e63306ec6e1a7115d (diff)
fuse: optimize __fuse_direct_io()
__fuse_direct_io() allocates fuse-requests by calling fuse_get_req(fc, n). The patch calculates 'n' based on iov[] array. This is useful because allocating FUSE_MAX_PAGES_PER_REQ page pointers and descriptors for each fuse request would be waste of memory in case of iov-s of smaller size. Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com> Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/file.c25
1 files changed, 21 insertions, 4 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index b2aa6c21e209..68e10d43bd3f 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1088,14 +1088,14 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
1088 return 0; 1088 return 0;
1089 } 1089 }
1090 1090
1091 while (nbytes < *nbytesp && req->num_pages < FUSE_MAX_PAGES_PER_REQ) { 1091 while (nbytes < *nbytesp && req->num_pages < req->max_pages) {
1092 unsigned npages; 1092 unsigned npages;
1093 unsigned long user_addr = fuse_get_user_addr(ii); 1093 unsigned long user_addr = fuse_get_user_addr(ii);
1094 unsigned offset = user_addr & ~PAGE_MASK; 1094 unsigned offset = user_addr & ~PAGE_MASK;
1095 size_t frag_size = fuse_get_frag_size(ii, *nbytesp - nbytes); 1095 size_t frag_size = fuse_get_frag_size(ii, *nbytesp - nbytes);
1096 int ret; 1096 int ret;
1097 1097
1098 unsigned n = FUSE_MAX_PAGES_PER_REQ - req->num_pages; 1098 unsigned n = req->max_pages - req->num_pages;
1099 frag_size = min_t(size_t, frag_size, n << PAGE_SHIFT); 1099 frag_size = min_t(size_t, frag_size, n << PAGE_SHIFT);
1100 1100
1101 npages = (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 1101 npages = (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1131,6 +1131,23 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
1131 return 0; 1131 return 0;
1132} 1132}
1133 1133
1134static inline int fuse_iter_npages(const struct iov_iter *ii_p)
1135{
1136 struct iov_iter ii = *ii_p;
1137 int npages = 0;
1138
1139 while (iov_iter_count(&ii) && npages < FUSE_MAX_PAGES_PER_REQ) {
1140 unsigned long user_addr = fuse_get_user_addr(&ii);
1141 unsigned offset = user_addr & ~PAGE_MASK;
1142 size_t frag_size = iov_iter_single_seg_count(&ii);
1143
1144 npages += (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1145 iov_iter_advance(&ii, frag_size);
1146 }
1147
1148 return min(npages, FUSE_MAX_PAGES_PER_REQ);
1149}
1150
1134static ssize_t __fuse_direct_io(struct file *file, const struct iovec *iov, 1151static ssize_t __fuse_direct_io(struct file *file, const struct iovec *iov,
1135 unsigned long nr_segs, size_t count, 1152 unsigned long nr_segs, size_t count,
1136 loff_t *ppos, int write) 1153 loff_t *ppos, int write)
@@ -1145,7 +1162,7 @@ static ssize_t __fuse_direct_io(struct file *file, const struct iovec *iov,
1145 1162
1146 iov_iter_init(&ii, iov, nr_segs, count, 0); 1163 iov_iter_init(&ii, iov, nr_segs, count, 0);
1147 1164
1148 req = fuse_get_req(fc, FUSE_MAX_PAGES_PER_REQ); 1165 req = fuse_get_req(fc, fuse_iter_npages(&ii));
1149 if (IS_ERR(req)) 1166 if (IS_ERR(req))
1150 return PTR_ERR(req); 1167 return PTR_ERR(req);
1151 1168
@@ -1180,7 +1197,7 @@ static ssize_t __fuse_direct_io(struct file *file, const struct iovec *iov,
1180 break; 1197 break;
1181 if (count) { 1198 if (count) {
1182 fuse_put_request(fc, req); 1199 fuse_put_request(fc, req);
1183 req = fuse_get_req(fc, FUSE_MAX_PAGES_PER_REQ); 1200 req = fuse_get_req(fc, fuse_iter_npages(&ii));
1184 if (IS_ERR(req)) 1201 if (IS_ERR(req))
1185 break; 1202 break;
1186 } 1203 }