aboutsummaryrefslogtreecommitdiffstats
path: root/fs/hfs/bnode.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-04-01 08:29:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-04 13:41:08 -0400
commit09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a (patch)
tree6cdf210c9c0f981cd22544feeba701892ec19464 /fs/hfs/bnode.c
parentc05c2ec96bb8b7310da1055c7b9d786a3ec6dc0c (diff)
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/hfs/bnode.c')
-rw-r--r--fs/hfs/bnode.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 221719eac5de..d77d844b668b 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -278,14 +278,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
278 278
279 mapping = tree->inode->i_mapping; 279 mapping = tree->inode->i_mapping;
280 off = (loff_t)cnid * tree->node_size; 280 off = (loff_t)cnid * tree->node_size;
281 block = off >> PAGE_CACHE_SHIFT; 281 block = off >> PAGE_SHIFT;
282 node->page_offset = off & ~PAGE_CACHE_MASK; 282 node->page_offset = off & ~PAGE_MASK;
283 for (i = 0; i < tree->pages_per_bnode; i++) { 283 for (i = 0; i < tree->pages_per_bnode; i++) {
284 page = read_mapping_page(mapping, block++, NULL); 284 page = read_mapping_page(mapping, block++, NULL);
285 if (IS_ERR(page)) 285 if (IS_ERR(page))
286 goto fail; 286 goto fail;
287 if (PageError(page)) { 287 if (PageError(page)) {
288 page_cache_release(page); 288 put_page(page);
289 goto fail; 289 goto fail;
290 } 290 }
291 node->page[i] = page; 291 node->page[i] = page;
@@ -401,7 +401,7 @@ void hfs_bnode_free(struct hfs_bnode *node)
401 401
402 for (i = 0; i < node->tree->pages_per_bnode; i++) 402 for (i = 0; i < node->tree->pages_per_bnode; i++)
403 if (node->page[i]) 403 if (node->page[i])
404 page_cache_release(node->page[i]); 404 put_page(node->page[i]);
405 kfree(node); 405 kfree(node);
406} 406}
407 407
@@ -429,11 +429,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
429 429
430 pagep = node->page; 430 pagep = node->page;
431 memset(kmap(*pagep) + node->page_offset, 0, 431 memset(kmap(*pagep) + node->page_offset, 0,
432 min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); 432 min((int)PAGE_SIZE, (int)tree->node_size));
433 set_page_dirty(*pagep); 433 set_page_dirty(*pagep);
434 kunmap(*pagep); 434 kunmap(*pagep);
435 for (i = 1; i < tree->pages_per_bnode; i++) { 435 for (i = 1; i < tree->pages_per_bnode; i++) {
436 memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); 436 memset(kmap(*++pagep), 0, PAGE_SIZE);
437 set_page_dirty(*pagep); 437 set_page_dirty(*pagep);
438 kunmap(*pagep); 438 kunmap(*pagep);
439 } 439 }