aboutsummaryrefslogtreecommitdiffstats
path: root/fs/afs/write.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-04-01 08:29:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-04 13:41:08 -0400
commit09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a (patch)
tree6cdf210c9c0f981cd22544feeba701892ec19464 /fs/afs/write.c
parentc05c2ec96bb8b7310da1055c7b9d786a3ec6dc0c (diff)
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/afs/write.c')
-rw-r--r--fs/afs/write.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/fs/afs/write.c b/fs/afs/write.c
index dfef94f70667..65de439bdc4f 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -93,10 +93,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
93 _enter(",,%llu", (unsigned long long)pos); 93 _enter(",,%llu", (unsigned long long)pos);
94 94
95 i_size = i_size_read(&vnode->vfs_inode); 95 i_size = i_size_read(&vnode->vfs_inode);
96 if (pos + PAGE_CACHE_SIZE > i_size) 96 if (pos + PAGE_SIZE > i_size)
97 len = i_size - pos; 97 len = i_size - pos;
98 else 98 else
99 len = PAGE_CACHE_SIZE; 99 len = PAGE_SIZE;
100 100
101 ret = afs_vnode_fetch_data(vnode, key, pos, len, page); 101 ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
102 if (ret < 0) { 102 if (ret < 0) {
@@ -123,9 +123,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
123 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 123 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
124 struct page *page; 124 struct page *page;
125 struct key *key = file->private_data; 125 struct key *key = file->private_data;
126 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 126 unsigned from = pos & (PAGE_SIZE - 1);
127 unsigned to = from + len; 127 unsigned to = from + len;
128 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 128 pgoff_t index = pos >> PAGE_SHIFT;
129 int ret; 129 int ret;
130 130
131 _enter("{%x:%u},{%lx},%u,%u", 131 _enter("{%x:%u},{%lx},%u,%u",
@@ -151,8 +151,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
151 *pagep = page; 151 *pagep = page;
152 /* page won't leak in error case: it eventually gets cleaned off LRU */ 152 /* page won't leak in error case: it eventually gets cleaned off LRU */
153 153
154 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) { 154 if (!PageUptodate(page) && len != PAGE_SIZE) {
155 ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page); 155 ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
156 if (ret < 0) { 156 if (ret < 0) {
157 kfree(candidate); 157 kfree(candidate);
158 _leave(" = %d [prep]", ret); 158 _leave(" = %d [prep]", ret);
@@ -266,7 +266,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
266 if (PageDirty(page)) 266 if (PageDirty(page))
267 _debug("dirtied"); 267 _debug("dirtied");
268 unlock_page(page); 268 unlock_page(page);
269 page_cache_release(page); 269 put_page(page);
270 270
271 return copied; 271 return copied;
272} 272}
@@ -480,7 +480,7 @@ static int afs_writepages_region(struct address_space *mapping,
480 480
481 if (page->index > end) { 481 if (page->index > end) {
482 *_next = index; 482 *_next = index;
483 page_cache_release(page); 483 put_page(page);
484 _leave(" = 0 [%lx]", *_next); 484 _leave(" = 0 [%lx]", *_next);
485 return 0; 485 return 0;
486 } 486 }
@@ -494,7 +494,7 @@ static int afs_writepages_region(struct address_space *mapping,
494 494
495 if (page->mapping != mapping) { 495 if (page->mapping != mapping) {
496 unlock_page(page); 496 unlock_page(page);
497 page_cache_release(page); 497 put_page(page);
498 continue; 498 continue;
499 } 499 }
500 500
@@ -515,7 +515,7 @@ static int afs_writepages_region(struct address_space *mapping,
515 515
516 ret = afs_write_back_from_locked_page(wb, page); 516 ret = afs_write_back_from_locked_page(wb, page);
517 unlock_page(page); 517 unlock_page(page);
518 page_cache_release(page); 518 put_page(page);
519 if (ret < 0) { 519 if (ret < 0) {
520 _leave(" = %d", ret); 520 _leave(" = %d", ret);
521 return ret; 521 return ret;
@@ -551,13 +551,13 @@ int afs_writepages(struct address_space *mapping,
551 &next); 551 &next);
552 mapping->writeback_index = next; 552 mapping->writeback_index = next;
553 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { 553 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
554 end = (pgoff_t)(LLONG_MAX >> PAGE_CACHE_SHIFT); 554 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
555 ret = afs_writepages_region(mapping, wbc, 0, end, &next); 555 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
556 if (wbc->nr_to_write > 0) 556 if (wbc->nr_to_write > 0)
557 mapping->writeback_index = next; 557 mapping->writeback_index = next;
558 } else { 558 } else {
559 start = wbc->range_start >> PAGE_CACHE_SHIFT; 559 start = wbc->range_start >> PAGE_SHIFT;
560 end = wbc->range_end >> PAGE_CACHE_SHIFT; 560 end = wbc->range_end >> PAGE_SHIFT;
561 ret = afs_writepages_region(mapping, wbc, start, end, &next); 561 ret = afs_writepages_region(mapping, wbc, start, end, &next);
562 } 562 }
563 563