aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-05 00:35:40 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-06 10:46:38 -0500
commit200baa2112012dd8a13db9da3ee6885403f9c013 (patch)
tree261f9c2b85927c2dfaaecfb7ab808d353bd84da4 /fs/nfs/write.c
parente21195a740533348e77efa8a2e2cf03bb4092b2b (diff)
NFS: Remove nfs_writepage_sync()
Maintaining two parallel ways of doing synchronous writes is rather pointless. This patch gets rid of the legacy nfs_writepage_sync(), and replaces it with the faster asynchronous writes. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c100
1 files changed, 4 insertions, 96 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index de9a16a8f7e4..f0720b544b12 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -210,78 +210,6 @@ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int
210 SetPageUptodate(page); 210 SetPageUptodate(page);
211} 211}
212 212
213/*
214 * Write a page synchronously.
215 * Offset is the data offset within the page.
216 */
217static int nfs_writepage_sync(struct nfs_open_context *ctx, struct page *page,
218 unsigned int offset, unsigned int count, int how)
219{
220 struct inode *inode = page->mapping->host;
221 unsigned int wsize = NFS_SERVER(inode)->wsize;
222 int result, written = 0;
223 struct nfs_write_data *wdata;
224
225 wdata = nfs_writedata_alloc(wsize);
226 if (!wdata)
227 return -ENOMEM;
228
229 wdata->flags = how;
230 wdata->cred = ctx->cred;
231 wdata->inode = inode;
232 wdata->args.fh = NFS_FH(inode);
233 wdata->args.context = ctx;
234 wdata->args.pages = &page;
235 wdata->args.stable = NFS_FILE_SYNC;
236 wdata->args.pgbase = offset;
237 wdata->args.count = wsize;
238 wdata->res.fattr = &wdata->fattr;
239 wdata->res.verf = &wdata->verf;
240
241 dprintk("NFS: nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
242 inode->i_sb->s_id,
243 (long long)NFS_FILEID(inode),
244 count, (long long)(page_offset(page) + offset));
245
246 set_page_writeback(page);
247 nfs_begin_data_update(inode);
248 do {
249 if (count < wsize)
250 wdata->args.count = count;
251 wdata->args.offset = page_offset(page) + wdata->args.pgbase;
252
253 result = NFS_PROTO(inode)->write(wdata);
254
255 if (result < 0) {
256 /* Must mark the page invalid after I/O error */
257 ClearPageUptodate(page);
258 goto io_error;
259 }
260 if (result < wdata->args.count)
261 printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
262 wdata->args.count, result);
263
264 wdata->args.offset += result;
265 wdata->args.pgbase += result;
266 written += result;
267 count -= result;
268 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
269 } while (count);
270 /* Update file length */
271 nfs_grow_file(page, offset, written);
272 /* Set the PG_uptodate flag? */
273 nfs_mark_uptodate(page, offset, written);
274
275 if (PageError(page))
276 ClearPageError(page);
277
278io_error:
279 nfs_end_data_update(inode);
280 end_page_writeback(page);
281 nfs_writedata_release(wdata);
282 return written ? written : result;
283}
284
285static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, 213static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
286 unsigned int offset, unsigned int count) 214 unsigned int offset, unsigned int count)
287{ 215{
@@ -342,22 +270,12 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
342 err = -EBADF; 270 err = -EBADF;
343 goto out; 271 goto out;
344 } 272 }
345 lock_kernel(); 273 err = nfs_writepage_setup(ctx, page, 0, offset);
346 if (!IS_SYNC(inode)) {
347 err = nfs_writepage_setup(ctx, page, 0, offset);
348 if (!wbc->for_writepages)
349 nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc));
350 } else {
351 err = nfs_writepage_sync(ctx, page, 0, offset, wb_priority(wbc));
352 if (err >= 0) {
353 if (err != offset)
354 redirty_page_for_writepage(wbc, page);
355 err = 0;
356 }
357 }
358 unlock_kernel();
359 put_nfs_open_context(ctx); 274 put_nfs_open_context(ctx);
275
360out: 276out:
277 if (!wbc->for_writepages)
278 nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc));
361 unlock_page(page); 279 unlock_page(page);
362 return err; 280 return err;
363} 281}
@@ -777,16 +695,6 @@ int nfs_updatepage(struct file *file, struct page *page,
777 file->f_dentry->d_name.name, count, 695 file->f_dentry->d_name.name, count,
778 (long long)(page_offset(page) +offset)); 696 (long long)(page_offset(page) +offset));
779 697
780 if (IS_SYNC(inode)) {
781 status = nfs_writepage_sync(ctx, page, offset, count, 0);
782 if (status > 0) {
783 if (offset == 0 && status == PAGE_CACHE_SIZE)
784 SetPageUptodate(page);
785 return 0;
786 }
787 return status;
788 }
789
790 /* If we're not using byte range locks, and we know the page 698 /* If we're not using byte range locks, and we know the page
791 * is entirely in cache, it may be more efficient to avoid 699 * is entirely in cache, it may be more efficient to avoid
792 * fragmenting write requests. 700 * fragmenting write requests.