aboutsummaryrefslogtreecommitdiffstats
path: root/fs/hfs/btree.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-04-01 08:29:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-04 13:41:08 -0400
commit09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a (patch)
tree6cdf210c9c0f981cd22544feeba701892ec19464 /fs/hfs/btree.c
parentc05c2ec96bb8b7310da1055c7b9d786a3ec6dc0c (diff)
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/hfs/btree.c')
-rw-r--r--fs/hfs/btree.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 1ab19e660e69..37cdd955eceb 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -116,14 +116,14 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
116 } 116 }
117 117
118 tree->node_size_shift = ffs(size) - 1; 118 tree->node_size_shift = ffs(size) - 1;
119 tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 119 tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
120 120
121 kunmap(page); 121 kunmap(page);
122 page_cache_release(page); 122 put_page(page);
123 return tree; 123 return tree;
124 124
125fail_page: 125fail_page:
126 page_cache_release(page); 126 put_page(page);
127free_inode: 127free_inode:
128 tree->inode->i_mapping->a_ops = &hfs_aops; 128 tree->inode->i_mapping->a_ops = &hfs_aops;
129 iput(tree->inode); 129 iput(tree->inode);
@@ -257,9 +257,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
257 off = off16; 257 off = off16;
258 258
259 off += node->page_offset; 259 off += node->page_offset;
260 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 260 pagep = node->page + (off >> PAGE_SHIFT);
261 data = kmap(*pagep); 261 data = kmap(*pagep);
262 off &= ~PAGE_CACHE_MASK; 262 off &= ~PAGE_MASK;
263 idx = 0; 263 idx = 0;
264 264
265 for (;;) { 265 for (;;) {
@@ -279,7 +279,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
279 } 279 }
280 } 280 }
281 } 281 }
282 if (++off >= PAGE_CACHE_SIZE) { 282 if (++off >= PAGE_SIZE) {
283 kunmap(*pagep); 283 kunmap(*pagep);
284 data = kmap(*++pagep); 284 data = kmap(*++pagep);
285 off = 0; 285 off = 0;
@@ -302,9 +302,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
302 len = hfs_brec_lenoff(node, 0, &off16); 302 len = hfs_brec_lenoff(node, 0, &off16);
303 off = off16; 303 off = off16;
304 off += node->page_offset; 304 off += node->page_offset;
305 pagep = node->page + (off >> PAGE_CACHE_SHIFT); 305 pagep = node->page + (off >> PAGE_SHIFT);
306 data = kmap(*pagep); 306 data = kmap(*pagep);
307 off &= ~PAGE_CACHE_MASK; 307 off &= ~PAGE_MASK;
308 } 308 }
309} 309}
310 310
@@ -348,9 +348,9 @@ void hfs_bmap_free(struct hfs_bnode *node)
348 len = hfs_brec_lenoff(node, 0, &off); 348 len = hfs_brec_lenoff(node, 0, &off);
349 } 349 }
350 off += node->page_offset + nidx / 8; 350 off += node->page_offset + nidx / 8;
351 page = node->page[off >> PAGE_CACHE_SHIFT]; 351 page = node->page[off >> PAGE_SHIFT];
352 data = kmap(page); 352 data = kmap(page);
353 off &= ~PAGE_CACHE_MASK; 353 off &= ~PAGE_MASK;
354 m = 1 << (~nidx & 7); 354 m = 1 << (~nidx & 7);
355 byte = data[off]; 355 byte = data[off];
356 if (!(byte & m)) { 356 if (!(byte & m)) {