aboutsummaryrefslogtreecommitdiffstats
path: root/fs/9p/vfs_addr.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/9p/vfs_addr.c')
-rw-r--r--fs/9p/vfs_addr.c183
1 files changed, 168 insertions, 15 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 637bd703e28c..566684ce55e2 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -39,16 +39,16 @@
39#include "v9fs.h" 39#include "v9fs.h"
40#include "v9fs_vfs.h" 40#include "v9fs_vfs.h"
41#include "cache.h" 41#include "cache.h"
42#include "fid.h"
42 43
43/** 44/**
44 * v9fs_vfs_readpage - read an entire page in from 9P 45 * v9fs_fid_readpage - read an entire page in from 9P
45 * 46 *
46 * @filp: file being read 47 * @fid: fid being read
47 * @page: structure to page 48 * @page: structure to page
48 * 49 *
49 */ 50 */
50 51static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
51static int v9fs_vfs_readpage(struct file *filp, struct page *page)
52{ 52{
53 int retval; 53 int retval;
54 loff_t offset; 54 loff_t offset;
@@ -67,7 +67,7 @@ static int v9fs_vfs_readpage(struct file *filp, struct page *page)
67 buffer = kmap(page); 67 buffer = kmap(page);
68 offset = page_offset(page); 68 offset = page_offset(page);
69 69
70 retval = v9fs_file_readn(filp, buffer, NULL, PAGE_CACHE_SIZE, offset); 70 retval = v9fs_fid_readn(fid, buffer, NULL, PAGE_CACHE_SIZE, offset);
71 if (retval < 0) { 71 if (retval < 0) {
72 v9fs_uncache_page(inode, page); 72 v9fs_uncache_page(inode, page);
73 goto done; 73 goto done;
@@ -87,6 +87,19 @@ done:
87} 87}
88 88
89/** 89/**
90 * v9fs_vfs_readpage - read an entire page in from 9P
91 *
92 * @filp: file being read
93 * @page: structure to page
94 *
95 */
96
97static int v9fs_vfs_readpage(struct file *filp, struct page *page)
98{
99 return v9fs_fid_readpage(filp->private_data, page);
100}
101
102/**
90 * v9fs_vfs_readpages - read a set of pages from 9P 103 * v9fs_vfs_readpages - read a set of pages from 9P
91 * 104 *
92 * @filp: file being read 105 * @filp: file being read
@@ -124,7 +137,6 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
124{ 137{
125 if (PagePrivate(page)) 138 if (PagePrivate(page))
126 return 0; 139 return 0;
127
128 return v9fs_fscache_release_page(page, gfp); 140 return v9fs_fscache_release_page(page, gfp);
129} 141}
130 142
@@ -137,22 +149,87 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
137 149
138static void v9fs_invalidate_page(struct page *page, unsigned long offset) 150static void v9fs_invalidate_page(struct page *page, unsigned long offset)
139{ 151{
152 /*
153 * If called with zero offset, we should release
154 * the private state assocated with the page
155 */
140 if (offset == 0) 156 if (offset == 0)
141 v9fs_fscache_invalidate_page(page); 157 v9fs_fscache_invalidate_page(page);
142} 158}
143 159
160static int v9fs_vfs_writepage_locked(struct page *page)
161{
162 char *buffer;
163 int retval, len;
164 loff_t offset, size;
165 mm_segment_t old_fs;
166 struct inode *inode = page->mapping->host;
167
168 size = i_size_read(inode);
169 if (page->index == size >> PAGE_CACHE_SHIFT)
170 len = size & ~PAGE_CACHE_MASK;
171 else
172 len = PAGE_CACHE_SIZE;
173
174 set_page_writeback(page);
175
176 buffer = kmap(page);
177 offset = page_offset(page);
178
179 old_fs = get_fs();
180 set_fs(get_ds());
181 /* We should have i_private always set */
182 BUG_ON(!inode->i_private);
183
184 retval = v9fs_file_write_internal(inode,
185 (struct p9_fid *)inode->i_private,
186 (__force const char __user *)buffer,
187 len, &offset, 0);
188 if (retval > 0)
189 retval = 0;
190
191 set_fs(old_fs);
192 kunmap(page);
193 end_page_writeback(page);
194 return retval;
195}
196
197static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
198{
199 int retval;
200
201 retval = v9fs_vfs_writepage_locked(page);
202 if (retval < 0) {
203 if (retval == -EAGAIN) {
204 redirty_page_for_writepage(wbc, page);
205 retval = 0;
206 } else {
207 SetPageError(page);
208 mapping_set_error(page->mapping, retval);
209 }
210 } else
211 retval = 0;
212
213 unlock_page(page);
214 return retval;
215}
216
144/** 217/**
145 * v9fs_launder_page - Writeback a dirty page 218 * v9fs_launder_page - Writeback a dirty page
146 * Since the writes go directly to the server, we simply return a 0
147 * here to indicate success.
148 *
149 * Returns 0 on success. 219 * Returns 0 on success.
150 */ 220 */
151 221
152static int v9fs_launder_page(struct page *page) 222static int v9fs_launder_page(struct page *page)
153{ 223{
224 int retval;
154 struct inode *inode = page->mapping->host; 225 struct inode *inode = page->mapping->host;
226
155 v9fs_fscache_wait_on_page_write(inode, page); 227 v9fs_fscache_wait_on_page_write(inode, page);
228 if (clear_page_dirty_for_io(page)) {
229 retval = v9fs_vfs_writepage_locked(page);
230 if (retval)
231 return retval;
232 }
156 return 0; 233 return 0;
157} 234}
158 235
@@ -178,6 +255,11 @@ static int v9fs_launder_page(struct page *page)
178ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 255ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
179 loff_t pos, unsigned long nr_segs) 256 loff_t pos, unsigned long nr_segs)
180{ 257{
258 /*
259 * FIXME
260 * Now that we do caching with cache mode enabled, We need
261 * to support direct IO
262 */
181 P9_DPRINTK(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) " 263 P9_DPRINTK(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) "
182 "off/no(%lld/%lu) EINVAL\n", 264 "off/no(%lld/%lu) EINVAL\n",
183 iocb->ki_filp->f_path.dentry->d_name.name, 265 iocb->ki_filp->f_path.dentry->d_name.name,
@@ -185,11 +267,82 @@ ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
185 267
186 return -EINVAL; 268 return -EINVAL;
187} 269}
270
271static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
272 loff_t pos, unsigned len, unsigned flags,
273 struct page **pagep, void **fsdata)
274{
275 int retval = 0;
276 struct page *page;
277 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
278 struct inode *inode = mapping->host;
279
280start:
281 page = grab_cache_page_write_begin(mapping, index, flags);
282 if (!page) {
283 retval = -ENOMEM;
284 goto out;
285 }
286 BUG_ON(!inode->i_private);
287 if (PageUptodate(page))
288 goto out;
289
290 if (len == PAGE_CACHE_SIZE)
291 goto out;
292
293 retval = v9fs_fid_readpage(inode->i_private, page);
294 page_cache_release(page);
295 if (!retval)
296 goto start;
297out:
298 *pagep = page;
299 return retval;
300}
301
302static int v9fs_write_end(struct file *filp, struct address_space *mapping,
303 loff_t pos, unsigned len, unsigned copied,
304 struct page *page, void *fsdata)
305{
306 loff_t last_pos = pos + copied;
307 struct inode *inode = page->mapping->host;
308
309 if (unlikely(copied < len)) {
310 /*
311 * zero out the rest of the area
312 */
313 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
314
315 zero_user(page, from + copied, len - copied);
316 flush_dcache_page(page);
317 }
318
319 if (!PageUptodate(page))
320 SetPageUptodate(page);
321 /*
322 * No need to use i_size_read() here, the i_size
323 * cannot change under us because we hold the i_mutex.
324 */
325 if (last_pos > inode->i_size) {
326 inode_add_bytes(inode, last_pos - inode->i_size);
327 i_size_write(inode, last_pos);
328 }
329 set_page_dirty(page);
330 unlock_page(page);
331 page_cache_release(page);
332
333 return copied;
334}
335
336
188const struct address_space_operations v9fs_addr_operations = { 337const struct address_space_operations v9fs_addr_operations = {
189 .readpage = v9fs_vfs_readpage, 338 .readpage = v9fs_vfs_readpage,
190 .readpages = v9fs_vfs_readpages, 339 .readpages = v9fs_vfs_readpages,
191 .releasepage = v9fs_release_page, 340 .set_page_dirty = __set_page_dirty_nobuffers,
192 .invalidatepage = v9fs_invalidate_page, 341 .writepage = v9fs_vfs_writepage,
193 .launder_page = v9fs_launder_page, 342 .write_begin = v9fs_write_begin,
194 .direct_IO = v9fs_direct_IO, 343 .write_end = v9fs_write_end,
344 .releasepage = v9fs_release_page,
345 .invalidatepage = v9fs_invalidate_page,
346 .launder_page = v9fs_launder_page,
347 .direct_IO = v9fs_direct_IO,
195}; 348};