diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2011-02-28 06:33:58 -0500 |
---|---|---|
committer | Eric Van Hensbergen <ericvh@gmail.com> | 2011-03-15 10:57:37 -0400 |
commit | 7263cebed9fadad719063fdc8bba7085cf2c080d (patch) | |
tree | 716144cbdfa362c0beb3c3fafccc8793582dcafc /fs/9p | |
parent | 3cf387d780944305839f5b27c51f225444ba4d27 (diff) |
fs/9p: Add buffered write support for v9fs.
We can now support writeable mmaps.
Based on the original patch from Badari Pulavarty <pbadari@us.ibm.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
Diffstat (limited to 'fs/9p')
-rw-r--r-- | fs/9p/vfs_addr.c | 183 | ||||
-rw-r--r-- | fs/9p/vfs_file.c | 54 |
2 files changed, 218 insertions, 19 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 637bd703e28c..566684ce55e2 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c | |||
@@ -39,16 +39,16 @@ | |||
39 | #include "v9fs.h" | 39 | #include "v9fs.h" |
40 | #include "v9fs_vfs.h" | 40 | #include "v9fs_vfs.h" |
41 | #include "cache.h" | 41 | #include "cache.h" |
42 | #include "fid.h" | ||
42 | 43 | ||
43 | /** | 44 | /** |
44 | * v9fs_vfs_readpage - read an entire page in from 9P | 45 | * v9fs_fid_readpage - read an entire page in from 9P |
45 | * | 46 | * |
46 | * @filp: file being read | 47 | * @fid: fid being read |
47 | * @page: structure to page | 48 | * @page: structure to page |
48 | * | 49 | * |
49 | */ | 50 | */ |
50 | 51 | static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page) | |
51 | static int v9fs_vfs_readpage(struct file *filp, struct page *page) | ||
52 | { | 52 | { |
53 | int retval; | 53 | int retval; |
54 | loff_t offset; | 54 | loff_t offset; |
@@ -67,7 +67,7 @@ static int v9fs_vfs_readpage(struct file *filp, struct page *page) | |||
67 | buffer = kmap(page); | 67 | buffer = kmap(page); |
68 | offset = page_offset(page); | 68 | offset = page_offset(page); |
69 | 69 | ||
70 | retval = v9fs_file_readn(filp, buffer, NULL, PAGE_CACHE_SIZE, offset); | 70 | retval = v9fs_fid_readn(fid, buffer, NULL, PAGE_CACHE_SIZE, offset); |
71 | if (retval < 0) { | 71 | if (retval < 0) { |
72 | v9fs_uncache_page(inode, page); | 72 | v9fs_uncache_page(inode, page); |
73 | goto done; | 73 | goto done; |
@@ -87,6 +87,19 @@ done: | |||
87 | } | 87 | } |
88 | 88 | ||
89 | /** | 89 | /** |
90 | * v9fs_vfs_readpage - read an entire page in from 9P | ||
91 | * | ||
92 | * @filp: file being read | ||
93 | * @page: structure to page | ||
94 | * | ||
95 | */ | ||
96 | |||
97 | static int v9fs_vfs_readpage(struct file *filp, struct page *page) | ||
98 | { | ||
99 | return v9fs_fid_readpage(filp->private_data, page); | ||
100 | } | ||
101 | |||
102 | /** | ||
90 | * v9fs_vfs_readpages - read a set of pages from 9P | 103 | * v9fs_vfs_readpages - read a set of pages from 9P |
91 | * | 104 | * |
92 | * @filp: file being read | 105 | * @filp: file being read |
@@ -124,7 +137,6 @@ static int v9fs_release_page(struct page *page, gfp_t gfp) | |||
124 | { | 137 | { |
125 | if (PagePrivate(page)) | 138 | if (PagePrivate(page)) |
126 | return 0; | 139 | return 0; |
127 | |||
128 | return v9fs_fscache_release_page(page, gfp); | 140 | return v9fs_fscache_release_page(page, gfp); |
129 | } | 141 | } |
130 | 142 | ||
@@ -137,22 +149,87 @@ static int v9fs_release_page(struct page *page, gfp_t gfp) | |||
137 | 149 | ||
138 | static void v9fs_invalidate_page(struct page *page, unsigned long offset) | 150 | static void v9fs_invalidate_page(struct page *page, unsigned long offset) |
139 | { | 151 | { |
152 | /* | ||
153 | * If called with zero offset, we should release | ||
154 | * the private state assocated with the page | ||
155 | */ | ||
140 | if (offset == 0) | 156 | if (offset == 0) |
141 | v9fs_fscache_invalidate_page(page); | 157 | v9fs_fscache_invalidate_page(page); |
142 | } | 158 | } |
143 | 159 | ||
160 | static int v9fs_vfs_writepage_locked(struct page *page) | ||
161 | { | ||
162 | char *buffer; | ||
163 | int retval, len; | ||
164 | loff_t offset, size; | ||
165 | mm_segment_t old_fs; | ||
166 | struct inode *inode = page->mapping->host; | ||
167 | |||
168 | size = i_size_read(inode); | ||
169 | if (page->index == size >> PAGE_CACHE_SHIFT) | ||
170 | len = size & ~PAGE_CACHE_MASK; | ||
171 | else | ||
172 | len = PAGE_CACHE_SIZE; | ||
173 | |||
174 | set_page_writeback(page); | ||
175 | |||
176 | buffer = kmap(page); | ||
177 | offset = page_offset(page); | ||
178 | |||
179 | old_fs = get_fs(); | ||
180 | set_fs(get_ds()); | ||
181 | /* We should have i_private always set */ | ||
182 | BUG_ON(!inode->i_private); | ||
183 | |||
184 | retval = v9fs_file_write_internal(inode, | ||
185 | (struct p9_fid *)inode->i_private, | ||
186 | (__force const char __user *)buffer, | ||
187 | len, &offset, 0); | ||
188 | if (retval > 0) | ||
189 | retval = 0; | ||
190 | |||
191 | set_fs(old_fs); | ||
192 | kunmap(page); | ||
193 | end_page_writeback(page); | ||
194 | return retval; | ||
195 | } | ||
196 | |||
197 | static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc) | ||
198 | { | ||
199 | int retval; | ||
200 | |||
201 | retval = v9fs_vfs_writepage_locked(page); | ||
202 | if (retval < 0) { | ||
203 | if (retval == -EAGAIN) { | ||
204 | redirty_page_for_writepage(wbc, page); | ||
205 | retval = 0; | ||
206 | } else { | ||
207 | SetPageError(page); | ||
208 | mapping_set_error(page->mapping, retval); | ||
209 | } | ||
210 | } else | ||
211 | retval = 0; | ||
212 | |||
213 | unlock_page(page); | ||
214 | return retval; | ||
215 | } | ||
216 | |||
144 | /** | 217 | /** |
145 | * v9fs_launder_page - Writeback a dirty page | 218 | * v9fs_launder_page - Writeback a dirty page |
146 | * Since the writes go directly to the server, we simply return a 0 | ||
147 | * here to indicate success. | ||
148 | * | ||
149 | * Returns 0 on success. | 219 | * Returns 0 on success. |
150 | */ | 220 | */ |
151 | 221 | ||
152 | static int v9fs_launder_page(struct page *page) | 222 | static int v9fs_launder_page(struct page *page) |
153 | { | 223 | { |
224 | int retval; | ||
154 | struct inode *inode = page->mapping->host; | 225 | struct inode *inode = page->mapping->host; |
226 | |||
155 | v9fs_fscache_wait_on_page_write(inode, page); | 227 | v9fs_fscache_wait_on_page_write(inode, page); |
228 | if (clear_page_dirty_for_io(page)) { | ||
229 | retval = v9fs_vfs_writepage_locked(page); | ||
230 | if (retval) | ||
231 | return retval; | ||
232 | } | ||
156 | return 0; | 233 | return 0; |
157 | } | 234 | } |
158 | 235 | ||
@@ -178,6 +255,11 @@ static int v9fs_launder_page(struct page *page) | |||
178 | ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | 255 | ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, |
179 | loff_t pos, unsigned long nr_segs) | 256 | loff_t pos, unsigned long nr_segs) |
180 | { | 257 | { |
258 | /* | ||
259 | * FIXME | ||
260 | * Now that we do caching with cache mode enabled, We need | ||
261 | * to support direct IO | ||
262 | */ | ||
181 | P9_DPRINTK(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) " | 263 | P9_DPRINTK(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) " |
182 | "off/no(%lld/%lu) EINVAL\n", | 264 | "off/no(%lld/%lu) EINVAL\n", |
183 | iocb->ki_filp->f_path.dentry->d_name.name, | 265 | iocb->ki_filp->f_path.dentry->d_name.name, |
@@ -185,11 +267,82 @@ ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |||
185 | 267 | ||
186 | return -EINVAL; | 268 | return -EINVAL; |
187 | } | 269 | } |
270 | |||
271 | static int v9fs_write_begin(struct file *filp, struct address_space *mapping, | ||
272 | loff_t pos, unsigned len, unsigned flags, | ||
273 | struct page **pagep, void **fsdata) | ||
274 | { | ||
275 | int retval = 0; | ||
276 | struct page *page; | ||
277 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | ||
278 | struct inode *inode = mapping->host; | ||
279 | |||
280 | start: | ||
281 | page = grab_cache_page_write_begin(mapping, index, flags); | ||
282 | if (!page) { | ||
283 | retval = -ENOMEM; | ||
284 | goto out; | ||
285 | } | ||
286 | BUG_ON(!inode->i_private); | ||
287 | if (PageUptodate(page)) | ||
288 | goto out; | ||
289 | |||
290 | if (len == PAGE_CACHE_SIZE) | ||
291 | goto out; | ||
292 | |||
293 | retval = v9fs_fid_readpage(inode->i_private, page); | ||
294 | page_cache_release(page); | ||
295 | if (!retval) | ||
296 | goto start; | ||
297 | out: | ||
298 | *pagep = page; | ||
299 | return retval; | ||
300 | } | ||
301 | |||
302 | static int v9fs_write_end(struct file *filp, struct address_space *mapping, | ||
303 | loff_t pos, unsigned len, unsigned copied, | ||
304 | struct page *page, void *fsdata) | ||
305 | { | ||
306 | loff_t last_pos = pos + copied; | ||
307 | struct inode *inode = page->mapping->host; | ||
308 | |||
309 | if (unlikely(copied < len)) { | ||
310 | /* | ||
311 | * zero out the rest of the area | ||
312 | */ | ||
313 | unsigned from = pos & (PAGE_CACHE_SIZE - 1); | ||
314 | |||
315 | zero_user(page, from + copied, len - copied); | ||
316 | flush_dcache_page(page); | ||
317 | } | ||
318 | |||
319 | if (!PageUptodate(page)) | ||
320 | SetPageUptodate(page); | ||
321 | /* | ||
322 | * No need to use i_size_read() here, the i_size | ||
323 | * cannot change under us because we hold the i_mutex. | ||
324 | */ | ||
325 | if (last_pos > inode->i_size) { | ||
326 | inode_add_bytes(inode, last_pos - inode->i_size); | ||
327 | i_size_write(inode, last_pos); | ||
328 | } | ||
329 | set_page_dirty(page); | ||
330 | unlock_page(page); | ||
331 | page_cache_release(page); | ||
332 | |||
333 | return copied; | ||
334 | } | ||
335 | |||
336 | |||
188 | const struct address_space_operations v9fs_addr_operations = { | 337 | const struct address_space_operations v9fs_addr_operations = { |
189 | .readpage = v9fs_vfs_readpage, | 338 | .readpage = v9fs_vfs_readpage, |
190 | .readpages = v9fs_vfs_readpages, | 339 | .readpages = v9fs_vfs_readpages, |
191 | .releasepage = v9fs_release_page, | 340 | .set_page_dirty = __set_page_dirty_nobuffers, |
192 | .invalidatepage = v9fs_invalidate_page, | 341 | .writepage = v9fs_vfs_writepage, |
193 | .launder_page = v9fs_launder_page, | 342 | .write_begin = v9fs_write_begin, |
194 | .direct_IO = v9fs_direct_IO, | 343 | .write_end = v9fs_write_end, |
344 | .releasepage = v9fs_release_page, | ||
345 | .invalidatepage = v9fs_invalidate_page, | ||
346 | .launder_page = v9fs_launder_page, | ||
347 | .direct_IO = v9fs_direct_IO, | ||
195 | }; | 348 | }; |
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index e966f15f92ec..f7b571ddf99e 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c | |||
@@ -44,6 +44,8 @@ | |||
44 | #include "fid.h" | 44 | #include "fid.h" |
45 | #include "cache.h" | 45 | #include "cache.h" |
46 | 46 | ||
47 | static const struct vm_operations_struct v9fs_file_vm_ops; | ||
48 | |||
47 | /** | 49 | /** |
48 | * v9fs_file_open - open a file (or directory) | 50 | * v9fs_file_open - open a file (or directory) |
49 | * @inode: inode to be opened | 51 | * @inode: inode to be opened |
@@ -503,6 +505,7 @@ out: | |||
503 | return retval; | 505 | return retval; |
504 | } | 506 | } |
505 | 507 | ||
508 | |||
506 | static int v9fs_file_fsync(struct file *filp, int datasync) | 509 | static int v9fs_file_fsync(struct file *filp, int datasync) |
507 | { | 510 | { |
508 | struct p9_fid *fid; | 511 | struct p9_fid *fid; |
@@ -532,28 +535,71 @@ int v9fs_file_fsync_dotl(struct file *filp, int datasync) | |||
532 | return retval; | 535 | return retval; |
533 | } | 536 | } |
534 | 537 | ||
538 | static int | ||
539 | v9fs_file_mmap(struct file *file, struct vm_area_struct *vma) | ||
540 | { | ||
541 | int retval; | ||
542 | |||
543 | retval = generic_file_mmap(file, vma); | ||
544 | if (!retval) | ||
545 | vma->vm_ops = &v9fs_file_vm_ops; | ||
546 | |||
547 | return retval; | ||
548 | } | ||
549 | |||
550 | static int | ||
551 | v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
552 | { | ||
553 | struct page *page = vmf->page; | ||
554 | struct file *filp = vma->vm_file; | ||
555 | struct inode *inode = filp->f_path.dentry->d_inode; | ||
556 | |||
557 | |||
558 | P9_DPRINTK(P9_DEBUG_VFS, "page %p fid %lx\n", | ||
559 | page, (unsigned long)filp->private_data); | ||
560 | |||
561 | /* make sure the cache has finished storing the page */ | ||
562 | v9fs_fscache_wait_on_page_write(inode, page); | ||
563 | BUG_ON(!inode->i_private); | ||
564 | lock_page(page); | ||
565 | if (page->mapping != inode->i_mapping) | ||
566 | goto out_unlock; | ||
567 | |||
568 | return VM_FAULT_LOCKED; | ||
569 | out_unlock: | ||
570 | unlock_page(page); | ||
571 | return VM_FAULT_NOPAGE; | ||
572 | } | ||
573 | |||
574 | static const struct vm_operations_struct v9fs_file_vm_ops = { | ||
575 | .fault = filemap_fault, | ||
576 | .page_mkwrite = v9fs_vm_page_mkwrite, | ||
577 | }; | ||
578 | |||
535 | const struct file_operations v9fs_cached_file_operations = { | 579 | const struct file_operations v9fs_cached_file_operations = { |
536 | .llseek = generic_file_llseek, | 580 | .llseek = generic_file_llseek, |
537 | .read = do_sync_read, | 581 | .read = do_sync_read, |
582 | .write = do_sync_write, | ||
538 | .aio_read = generic_file_aio_read, | 583 | .aio_read = generic_file_aio_read, |
539 | .write = v9fs_file_write, | 584 | .aio_write = generic_file_aio_write, |
540 | .open = v9fs_file_open, | 585 | .open = v9fs_file_open, |
541 | .release = v9fs_dir_release, | 586 | .release = v9fs_dir_release, |
542 | .lock = v9fs_file_lock, | 587 | .lock = v9fs_file_lock, |
543 | .mmap = generic_file_readonly_mmap, | 588 | .mmap = v9fs_file_mmap, |
544 | .fsync = v9fs_file_fsync, | 589 | .fsync = v9fs_file_fsync, |
545 | }; | 590 | }; |
546 | 591 | ||
547 | const struct file_operations v9fs_cached_file_operations_dotl = { | 592 | const struct file_operations v9fs_cached_file_operations_dotl = { |
548 | .llseek = generic_file_llseek, | 593 | .llseek = generic_file_llseek, |
549 | .read = do_sync_read, | 594 | .read = do_sync_read, |
595 | .write = do_sync_write, | ||
550 | .aio_read = generic_file_aio_read, | 596 | .aio_read = generic_file_aio_read, |
551 | .write = v9fs_file_write, | 597 | .aio_write = generic_file_aio_write, |
552 | .open = v9fs_file_open, | 598 | .open = v9fs_file_open, |
553 | .release = v9fs_dir_release, | 599 | .release = v9fs_dir_release, |
554 | .lock = v9fs_file_lock_dotl, | 600 | .lock = v9fs_file_lock_dotl, |
555 | .flock = v9fs_file_flock_dotl, | 601 | .flock = v9fs_file_flock_dotl, |
556 | .mmap = generic_file_readonly_mmap, | 602 | .mmap = v9fs_file_mmap, |
557 | .fsync = v9fs_file_fsync_dotl, | 603 | .fsync = v9fs_file_fsync_dotl, |
558 | }; | 604 | }; |
559 | 605 | ||