aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jffs2/file.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-04-01 08:29:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-04 13:41:08 -0400
commit09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a (patch)
tree6cdf210c9c0f981cd22544feeba701892ec19464 /fs/jffs2/file.c
parentc05c2ec96bb8b7310da1055c7b9d786a3ec6dc0c (diff)
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/jffs2/file.c')
-rw-r--r--fs/jffs2/file.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index cad86bac3453..0e62dec3effc 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -87,14 +87,15 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
87 int ret; 87 int ret;
88 88
89 jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", 89 jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
90 __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT); 90 __func__, inode->i_ino, pg->index << PAGE_SHIFT);
91 91
92 BUG_ON(!PageLocked(pg)); 92 BUG_ON(!PageLocked(pg));
93 93
94 pg_buf = kmap(pg); 94 pg_buf = kmap(pg);
95 /* FIXME: Can kmap fail? */ 95 /* FIXME: Can kmap fail? */
96 96
97 ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE); 97 ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
98 PAGE_SIZE);
98 99
99 if (ret) { 100 if (ret) {
100 ClearPageUptodate(pg); 101 ClearPageUptodate(pg);
@@ -137,8 +138,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
137 struct page *pg; 138 struct page *pg;
138 struct inode *inode = mapping->host; 139 struct inode *inode = mapping->host;
139 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 140 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
140 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 141 pgoff_t index = pos >> PAGE_SHIFT;
141 uint32_t pageofs = index << PAGE_CACHE_SHIFT; 142 uint32_t pageofs = index << PAGE_SHIFT;
142 int ret = 0; 143 int ret = 0;
143 144
144 pg = grab_cache_page_write_begin(mapping, index, flags); 145 pg = grab_cache_page_write_begin(mapping, index, flags);
@@ -230,7 +231,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
230 231
231out_page: 232out_page:
232 unlock_page(pg); 233 unlock_page(pg);
233 page_cache_release(pg); 234 put_page(pg);
234 return ret; 235 return ret;
235} 236}
236 237
@@ -245,14 +246,14 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
245 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 246 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
246 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 247 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
247 struct jffs2_raw_inode *ri; 248 struct jffs2_raw_inode *ri;
248 unsigned start = pos & (PAGE_CACHE_SIZE - 1); 249 unsigned start = pos & (PAGE_SIZE - 1);
249 unsigned end = start + copied; 250 unsigned end = start + copied;
250 unsigned aligned_start = start & ~3; 251 unsigned aligned_start = start & ~3;
251 int ret = 0; 252 int ret = 0;
252 uint32_t writtenlen = 0; 253 uint32_t writtenlen = 0;
253 254
254 jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", 255 jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
255 __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT, 256 __func__, inode->i_ino, pg->index << PAGE_SHIFT,
256 start, end, pg->flags); 257 start, end, pg->flags);
257 258
258 /* We need to avoid deadlock with page_cache_read() in 259 /* We need to avoid deadlock with page_cache_read() in
@@ -261,7 +262,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
261 to re-lock it. */ 262 to re-lock it. */
262 BUG_ON(!PageUptodate(pg)); 263 BUG_ON(!PageUptodate(pg));
263 264
264 if (end == PAGE_CACHE_SIZE) { 265 if (end == PAGE_SIZE) {
265 /* When writing out the end of a page, write out the 266 /* When writing out the end of a page, write out the
266 _whole_ page. This helps to reduce the number of 267 _whole_ page. This helps to reduce the number of
267 nodes in files which have many short writes, like 268 nodes in files which have many short writes, like
@@ -275,7 +276,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
275 jffs2_dbg(1, "%s(): Allocation of raw inode failed\n", 276 jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
276 __func__); 277 __func__);
277 unlock_page(pg); 278 unlock_page(pg);
278 page_cache_release(pg); 279 put_page(pg);
279 return -ENOMEM; 280 return -ENOMEM;
280 } 281 }
281 282
@@ -292,7 +293,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
292 kmap(pg); 293 kmap(pg);
293 294
294 ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start, 295 ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
295 (pg->index << PAGE_CACHE_SHIFT) + aligned_start, 296 (pg->index << PAGE_SHIFT) + aligned_start,
296 end - aligned_start, &writtenlen); 297 end - aligned_start, &writtenlen);
297 298
298 kunmap(pg); 299 kunmap(pg);
@@ -329,6 +330,6 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
329 jffs2_dbg(1, "%s() returning %d\n", 330 jffs2_dbg(1, "%s() returning %d\n",
330 __func__, writtenlen > 0 ? writtenlen : ret); 331 __func__, writtenlen > 0 ? writtenlen : ret);
331 unlock_page(pg); 332 unlock_page(pg);
332 page_cache_release(pg); 333 put_page(pg);
333 return writtenlen > 0 ? writtenlen : ret; 334 return writtenlen > 0 ? writtenlen : ret;
334} 335}