aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c89
1 files changed, 55 insertions, 34 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e4a2ad2059bd..5829d0ce7cfb 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -52,7 +52,7 @@ static mempool_t *nfs_commit_mempool;
52 52
53struct nfs_commit_data *nfs_commitdata_alloc(void) 53struct nfs_commit_data *nfs_commitdata_alloc(void)
54{ 54{
55 struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS); 55 struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
56 56
57 if (p) { 57 if (p) {
58 memset(p, 0, sizeof(*p)); 58 memset(p, 0, sizeof(*p));
@@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(nfs_commit_free);
70 70
71struct nfs_write_header *nfs_writehdr_alloc(void) 71struct nfs_write_header *nfs_writehdr_alloc(void)
72{ 72{
73 struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); 73 struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO);
74 74
75 if (p) { 75 if (p) {
76 struct nfs_pgio_header *hdr = &p->header; 76 struct nfs_pgio_header *hdr = &p->header;
@@ -142,25 +142,38 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
142 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 142 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
143} 143}
144 144
145static struct nfs_page *nfs_page_find_request_locked(struct page *page) 145static struct nfs_page *
146nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
146{ 147{
147 struct nfs_page *req = NULL; 148 struct nfs_page *req = NULL;
148 149
149 if (PagePrivate(page)) { 150 if (PagePrivate(page))
150 req = (struct nfs_page *)page_private(page); 151 req = (struct nfs_page *)page_private(page);
151 if (req != NULL) 152 else if (unlikely(PageSwapCache(page))) {
152 kref_get(&req->wb_kref); 153 struct nfs_page *freq, *t;
154
155 /* Linearly search the commit list for the correct req */
156 list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
157 if (freq->wb_page == page) {
158 req = freq;
159 break;
160 }
161 }
153 } 162 }
163
164 if (req)
165 kref_get(&req->wb_kref);
166
154 return req; 167 return req;
155} 168}
156 169
157static struct nfs_page *nfs_page_find_request(struct page *page) 170static struct nfs_page *nfs_page_find_request(struct page *page)
158{ 171{
159 struct inode *inode = page->mapping->host; 172 struct inode *inode = page_file_mapping(page)->host;
160 struct nfs_page *req = NULL; 173 struct nfs_page *req = NULL;
161 174
162 spin_lock(&inode->i_lock); 175 spin_lock(&inode->i_lock);
163 req = nfs_page_find_request_locked(page); 176 req = nfs_page_find_request_locked(NFS_I(inode), page);
164 spin_unlock(&inode->i_lock); 177 spin_unlock(&inode->i_lock);
165 return req; 178 return req;
166} 179}
@@ -168,16 +181,16 @@ static struct nfs_page *nfs_page_find_request(struct page *page)
168/* Adjust the file length if we're writing beyond the end */ 181/* Adjust the file length if we're writing beyond the end */
169static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) 182static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
170{ 183{
171 struct inode *inode = page->mapping->host; 184 struct inode *inode = page_file_mapping(page)->host;
172 loff_t end, i_size; 185 loff_t end, i_size;
173 pgoff_t end_index; 186 pgoff_t end_index;
174 187
175 spin_lock(&inode->i_lock); 188 spin_lock(&inode->i_lock);
176 i_size = i_size_read(inode); 189 i_size = i_size_read(inode);
177 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; 190 end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
178 if (i_size > 0 && page->index < end_index) 191 if (i_size > 0 && page_file_index(page) < end_index)
179 goto out; 192 goto out;
180 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count); 193 end = page_file_offset(page) + ((loff_t)offset+count);
181 if (i_size >= end) 194 if (i_size >= end)
182 goto out; 195 goto out;
183 i_size_write(inode, end); 196 i_size_write(inode, end);
@@ -190,7 +203,7 @@ out:
190static void nfs_set_pageerror(struct page *page) 203static void nfs_set_pageerror(struct page *page)
191{ 204{
192 SetPageError(page); 205 SetPageError(page);
193 nfs_zap_mapping(page->mapping->host, page->mapping); 206 nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
194} 207}
195 208
196/* We can set the PG_uptodate flag if we see that a write request 209/* We can set the PG_uptodate flag if we see that a write request
@@ -231,7 +244,7 @@ static int nfs_set_page_writeback(struct page *page)
231 int ret = test_set_page_writeback(page); 244 int ret = test_set_page_writeback(page);
232 245
233 if (!ret) { 246 if (!ret) {
234 struct inode *inode = page->mapping->host; 247 struct inode *inode = page_file_mapping(page)->host;
235 struct nfs_server *nfss = NFS_SERVER(inode); 248 struct nfs_server *nfss = NFS_SERVER(inode);
236 249
237 if (atomic_long_inc_return(&nfss->writeback) > 250 if (atomic_long_inc_return(&nfss->writeback) >
@@ -245,7 +258,7 @@ static int nfs_set_page_writeback(struct page *page)
245 258
246static void nfs_end_page_writeback(struct page *page) 259static void nfs_end_page_writeback(struct page *page)
247{ 260{
248 struct inode *inode = page->mapping->host; 261 struct inode *inode = page_file_mapping(page)->host;
249 struct nfs_server *nfss = NFS_SERVER(inode); 262 struct nfs_server *nfss = NFS_SERVER(inode);
250 263
251 end_page_writeback(page); 264 end_page_writeback(page);
@@ -255,13 +268,13 @@ static void nfs_end_page_writeback(struct page *page)
255 268
256static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) 269static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
257{ 270{
258 struct inode *inode = page->mapping->host; 271 struct inode *inode = page_file_mapping(page)->host;
259 struct nfs_page *req; 272 struct nfs_page *req;
260 int ret; 273 int ret;
261 274
262 spin_lock(&inode->i_lock); 275 spin_lock(&inode->i_lock);
263 for (;;) { 276 for (;;) {
264 req = nfs_page_find_request_locked(page); 277 req = nfs_page_find_request_locked(NFS_I(inode), page);
265 if (req == NULL) 278 if (req == NULL)
266 break; 279 break;
267 if (nfs_lock_request(req)) 280 if (nfs_lock_request(req))
@@ -316,13 +329,13 @@ out:
316 329
317static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) 330static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
318{ 331{
319 struct inode *inode = page->mapping->host; 332 struct inode *inode = page_file_mapping(page)->host;
320 int ret; 333 int ret;
321 334
322 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 335 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
323 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); 336 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
324 337
325 nfs_pageio_cond_complete(pgio, page->index); 338 nfs_pageio_cond_complete(pgio, page_file_index(page));
326 ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); 339 ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
327 if (ret == -EAGAIN) { 340 if (ret == -EAGAIN) {
328 redirty_page_for_writepage(wbc, page); 341 redirty_page_for_writepage(wbc, page);
@@ -339,7 +352,7 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
339 struct nfs_pageio_descriptor pgio; 352 struct nfs_pageio_descriptor pgio;
340 int err; 353 int err;
341 354
342 NFS_PROTO(page->mapping->host)->write_pageio_init(&pgio, 355 NFS_PROTO(page_file_mapping(page)->host)->write_pageio_init(&pgio,
343 page->mapping->host, 356 page->mapping->host,
344 wb_priority(wbc), 357 wb_priority(wbc),
345 &nfs_async_write_completion_ops); 358 &nfs_async_write_completion_ops);
@@ -416,9 +429,15 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
416 spin_lock(&inode->i_lock); 429 spin_lock(&inode->i_lock);
417 if (!nfsi->npages && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 430 if (!nfsi->npages && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
418 inode->i_version++; 431 inode->i_version++;
419 set_bit(PG_MAPPED, &req->wb_flags); 432 /*
420 SetPagePrivate(req->wb_page); 433 * Swap-space should not get truncated. Hence no need to plug the race
421 set_page_private(req->wb_page, (unsigned long)req); 434 * with invalidate/truncate.
435 */
436 if (likely(!PageSwapCache(req->wb_page))) {
437 set_bit(PG_MAPPED, &req->wb_flags);
438 SetPagePrivate(req->wb_page);
439 set_page_private(req->wb_page, (unsigned long)req);
440 }
422 nfsi->npages++; 441 nfsi->npages++;
423 kref_get(&req->wb_kref); 442 kref_get(&req->wb_kref);
424 spin_unlock(&inode->i_lock); 443 spin_unlock(&inode->i_lock);
@@ -435,9 +454,11 @@ static void nfs_inode_remove_request(struct nfs_page *req)
435 BUG_ON (!NFS_WBACK_BUSY(req)); 454 BUG_ON (!NFS_WBACK_BUSY(req));
436 455
437 spin_lock(&inode->i_lock); 456 spin_lock(&inode->i_lock);
438 set_page_private(req->wb_page, 0); 457 if (likely(!PageSwapCache(req->wb_page))) {
439 ClearPagePrivate(req->wb_page); 458 set_page_private(req->wb_page, 0);
440 clear_bit(PG_MAPPED, &req->wb_flags); 459 ClearPagePrivate(req->wb_page);
460 clear_bit(PG_MAPPED, &req->wb_flags);
461 }
441 nfsi->npages--; 462 nfsi->npages--;
442 spin_unlock(&inode->i_lock); 463 spin_unlock(&inode->i_lock);
443 nfs_release_request(req); 464 nfs_release_request(req);
@@ -474,7 +495,7 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
474 spin_unlock(cinfo->lock); 495 spin_unlock(cinfo->lock);
475 if (!cinfo->dreq) { 496 if (!cinfo->dreq) {
476 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 497 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
477 inc_bdi_stat(req->wb_page->mapping->backing_dev_info, 498 inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
478 BDI_RECLAIMABLE); 499 BDI_RECLAIMABLE);
479 __mark_inode_dirty(req->wb_context->dentry->d_inode, 500 __mark_inode_dirty(req->wb_context->dentry->d_inode,
480 I_DIRTY_DATASYNC); 501 I_DIRTY_DATASYNC);
@@ -541,7 +562,7 @@ static void
541nfs_clear_page_commit(struct page *page) 562nfs_clear_page_commit(struct page *page)
542{ 563{
543 dec_zone_page_state(page, NR_UNSTABLE_NFS); 564 dec_zone_page_state(page, NR_UNSTABLE_NFS);
544 dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE); 565 dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE);
545} 566}
546 567
547static void 568static void
@@ -733,7 +754,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
733 spin_lock(&inode->i_lock); 754 spin_lock(&inode->i_lock);
734 755
735 for (;;) { 756 for (;;) {
736 req = nfs_page_find_request_locked(page); 757 req = nfs_page_find_request_locked(NFS_I(inode), page);
737 if (req == NULL) 758 if (req == NULL)
738 goto out_unlock; 759 goto out_unlock;
739 760
@@ -792,7 +813,7 @@ out_err:
792static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, 813static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
793 struct page *page, unsigned int offset, unsigned int bytes) 814 struct page *page, unsigned int offset, unsigned int bytes)
794{ 815{
795 struct inode *inode = page->mapping->host; 816 struct inode *inode = page_file_mapping(page)->host;
796 struct nfs_page *req; 817 struct nfs_page *req;
797 818
798 req = nfs_try_to_update_request(inode, page, offset, bytes); 819 req = nfs_try_to_update_request(inode, page, offset, bytes);
@@ -845,7 +866,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
845 nfs_release_request(req); 866 nfs_release_request(req);
846 if (!do_flush) 867 if (!do_flush)
847 return 0; 868 return 0;
848 status = nfs_wb_page(page->mapping->host, page); 869 status = nfs_wb_page(page_file_mapping(page)->host, page);
849 } while (status == 0); 870 } while (status == 0);
850 return status; 871 return status;
851} 872}
@@ -875,7 +896,7 @@ int nfs_updatepage(struct file *file, struct page *page,
875 unsigned int offset, unsigned int count) 896 unsigned int offset, unsigned int count)
876{ 897{
877 struct nfs_open_context *ctx = nfs_file_open_context(file); 898 struct nfs_open_context *ctx = nfs_file_open_context(file);
878 struct inode *inode = page->mapping->host; 899 struct inode *inode = page_file_mapping(page)->host;
879 int status = 0; 900 int status = 0;
880 901
881 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); 902 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
@@ -883,7 +904,7 @@ int nfs_updatepage(struct file *file, struct page *page,
883 dprintk("NFS: nfs_updatepage(%s/%s %d@%lld)\n", 904 dprintk("NFS: nfs_updatepage(%s/%s %d@%lld)\n",
884 file->f_path.dentry->d_parent->d_name.name, 905 file->f_path.dentry->d_parent->d_name.name,
885 file->f_path.dentry->d_name.name, count, 906 file->f_path.dentry->d_name.name, count,
886 (long long)(page_offset(page) + offset)); 907 (long long)(page_file_offset(page) + offset));
887 908
888 /* If we're not using byte range locks, and we know the page 909 /* If we're not using byte range locks, and we know the page
889 * is up to date, it may be more efficient to extend the write 910 * is up to date, it may be more efficient to extend the write
@@ -1474,7 +1495,7 @@ void nfs_retry_commit(struct list_head *page_list,
1474 nfs_mark_request_commit(req, lseg, cinfo); 1495 nfs_mark_request_commit(req, lseg, cinfo);
1475 if (!cinfo->dreq) { 1496 if (!cinfo->dreq) {
1476 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1497 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1477 dec_bdi_stat(req->wb_page->mapping->backing_dev_info, 1498 dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
1478 BDI_RECLAIMABLE); 1499 BDI_RECLAIMABLE);
1479 } 1500 }
1480 nfs_unlock_and_release_request(req); 1501 nfs_unlock_and_release_request(req);
@@ -1731,7 +1752,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1731 */ 1752 */
1732int nfs_wb_page(struct inode *inode, struct page *page) 1753int nfs_wb_page(struct inode *inode, struct page *page)
1733{ 1754{
1734 loff_t range_start = page_offset(page); 1755 loff_t range_start = page_file_offset(page);
1735 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1756 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1736 struct writeback_control wbc = { 1757 struct writeback_control wbc = {
1737 .sync_mode = WB_SYNC_ALL, 1758 .sync_mode = WB_SYNC_ALL,