aboutsummaryrefslogtreecommitdiffstats
path: root/fs/hfs
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-04-01 08:29:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-04 13:41:08 -0400
commit09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a (patch)
tree6cdf210c9c0f981cd22544feeba701892ec19464 /fs/hfs
parentc05c2ec96bb8b7310da1055c7b9d786a3ec6dc0c (diff)
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/hfs')
-rw-r--r--fs/hfs/bnode.c12
-rw-r--r--fs/hfs/btree.c20
-rw-r--r--fs/hfs/inode.c8
3 files changed, 20 insertions, 20 deletions
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 221719eac5de..d77d844b668b 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -278,14 +278,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
278 278
279 mapping = tree->inode->i_mapping; 279 mapping = tree->inode->i_mapping;
280 off = (loff_t)cnid * tree->node_size; 280 off = (loff_t)cnid * tree->node_size;
281 block = off >> PAGE_CACHE_SHIFT; 281 block = off >> PAGE_SHIFT;
282 node->page_offset = off & ~PAGE_CACHE_MASK; 282 node->page_offset = off & ~PAGE_MASK;
283 for (i = 0; i < tree->pages_per_bnode; i++) { 283 for (i = 0; i < tree->pages_per_bnode; i++) {
284 page = read_mapping_page(mapping, block++, NULL); 284 page = read_mapping_page(mapping, block++, NULL);
285 if (IS_ERR(page)) 285 if (IS_ERR(page))
286 goto fail; 286 goto fail;
287 if (PageError(page)) { 287 if (PageError(page)) {
288 page_cache_release(page); 288 put_page(page);
289 goto fail; 289 goto fail;
290 } 290 }
291 node->page[i] = page; 291 node->page[i] = page;
@@ -401,7 +401,7 @@ void hfs_bnode_free(struct hfs_bnode *node)
401 401
402 for (i = 0; i < node->tree->pages_per_bnode; i++) 402 for (i = 0; i < node->tree->pages_per_bnode; i++)
403 if (node->page[i]) 403 if (node->page[i])
404 page_cache_release(node->page[i]); 404 put_page(node->page[i]);
405 kfree(node); 405 kfree(node);
406} 406}
407 407
@@ -429,11 +429,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
429 429
430 pagep = node->page; 430 pagep = node->page;
431 memset(kmap(*pagep) + node->page_offset, 0, 431 memset(kmap(*pagep) + node->page_offset, 0,
432 min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); 432 min((int)PAGE_SIZE, (int)tree->node_size));
433 set_page_dirty(*pagep); 433 set_page_dirty(*pagep);
434 kunmap(*pagep); 434 kunmap(*pagep);
435 for (i = 1; i < tree->pages_per_bnode; i++) { 435 for (i = 1; i < tree->pages_per_bnode; i++) {
436 memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); 436 memset(kmap(*++pagep), 0, PAGE_SIZE);
437 set_page_dirty(*pagep); 437 set_page_dirty(*pagep);
438 kunmap(*pagep); 438 kunmap(*pagep);
439 } 439 }
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 1ab19e660e69..37cdd955eceb 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -116,14 +116,14 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
116 } 116 }
117 117
118 tree->node_size_shift = ffs(size) - 1; 118 tree->node_size_shift = ffs(size) - 1;
119 tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 119 tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
120 120
121 kunmap(page); 121 kunmap(page);
122 page_cache_release(page); 122 put_page(page);
123 return tree; 123 return tree;
124 124
125fail_page: 125fail_page:
126 page_cache_release(page); 126 put_page(page);
127free_inode: 127free_inode:
128 tree->inode->i_mapping->a_ops = &hfs_aops; 128 tree->inode->i_mapping->a_ops = &hfs_aops;
129 iput(tree->inode); 129 iput(tree->inode);
@@ -257,9 +257,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
257 off = off16; 257 off = off16;
258 258
259 off += node->page_offset; 259 off += node->page_offset;
260 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 260 pagep = node->page + (off >> PAGE_SHIFT);
261 data = kmap(*pagep); 261 data = kmap(*pagep);
262 off &= ~PAGE_CACHE_MASK; 262 off &= ~PAGE_MASK;
263 idx = 0; 263 idx = 0;
264 264
265 for (;;) { 265 for (;;) {
@@ -279,7 +279,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
279 } 279 }
280 } 280 }
281 } 281 }
282 if (++off >= PAGE_CACHE_SIZE) { 282 if (++off >= PAGE_SIZE) {
283 kunmap(*pagep); 283 kunmap(*pagep);
284 data = kmap(*++pagep); 284 data = kmap(*++pagep);
285 off = 0; 285 off = 0;
@@ -302,9 +302,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
302 len = hfs_brec_lenoff(node, 0, &off16); 302 len = hfs_brec_lenoff(node, 0, &off16);
303 off = off16; 303 off = off16;
304 off += node->page_offset; 304 off += node->page_offset;
305 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 305 pagep = node->page + (off >> PAGE_SHIFT);
306 data = kmap(*pagep); 306 data = kmap(*pagep);
307 off &= ~PAGE_CACHE_MASK; 307 off &= ~PAGE_MASK;
308 } 308 }
309} 309}
310 310
@@ -348,9 +348,9 @@ void hfs_bmap_free(struct hfs_bnode *node)
348 len = hfs_brec_lenoff(node, 0, &off); 348 len = hfs_brec_lenoff(node, 0, &off);
349 } 349 }
350 off += node->page_offset + nidx / 8; 350 off += node->page_offset + nidx / 8;
351 page = node->page[off >> PAGE_CACHE_SHIFT]; 351 page = node->page[off >> PAGE_SHIFT];
352 data = kmap(page); 352 data = kmap(page);
353 off &= ~PAGE_CACHE_MASK; 353 off &= ~PAGE_MASK;
354 m = 1 << (~nidx & 7); 354 m = 1 << (~nidx & 7);
355 byte = data[off]; 355 byte = data[off];
356 if (!(byte & m)) { 356 if (!(byte & m)) {
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 6686bf39a5b5..cb1e5faa2fb7 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -91,8 +91,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
91 if (!tree) 91 if (!tree)
92 return 0; 92 return 0;
93 93
94 if (tree->node_size >= PAGE_CACHE_SIZE) { 94 if (tree->node_size >= PAGE_SIZE) {
95 nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT); 95 nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
96 spin_lock(&tree->hash_lock); 96 spin_lock(&tree->hash_lock);
97 node = hfs_bnode_findhash(tree, nidx); 97 node = hfs_bnode_findhash(tree, nidx);
98 if (!node) 98 if (!node)
@@ -105,8 +105,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
105 } 105 }
106 spin_unlock(&tree->hash_lock); 106 spin_unlock(&tree->hash_lock);
107 } else { 107 } else {
108 nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift); 108 nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
109 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift); 109 i = 1 << (PAGE_SHIFT - tree->node_size_shift);
110 spin_lock(&tree->hash_lock); 110 spin_lock(&tree->hash_lock);
111 do { 111 do {
112 node = hfs_bnode_findhash(tree, nidx++); 112 node = hfs_bnode_findhash(tree, nidx++);