summaryrefslogtreecommitdiffstats
path: root/mm/truncate.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-04-01 08:29:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-04 13:41:08 -0400
commit09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a (patch)
tree6cdf210c9c0f981cd22544feeba701892ec19464 /mm/truncate.c
parentc05c2ec96bb8b7310da1055c7b9d786a3ec6dc0c (diff)
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/truncate.c')
-rw-r--r--mm/truncate.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/mm/truncate.c b/mm/truncate.c
index 7598b552ae03..b00272810871 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -118,7 +118,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
118 return -EIO; 118 return -EIO;
119 119
120 if (page_has_private(page)) 120 if (page_has_private(page))
121 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); 121 do_invalidatepage(page, 0, PAGE_SIZE);
122 122
123 /* 123 /*
124 * Some filesystems seem to re-dirty the page even after 124 * Some filesystems seem to re-dirty the page even after
@@ -159,8 +159,8 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
159{ 159{
160 if (page_mapped(page)) { 160 if (page_mapped(page)) {
161 unmap_mapping_range(mapping, 161 unmap_mapping_range(mapping,
162 (loff_t)page->index << PAGE_CACHE_SHIFT, 162 (loff_t)page->index << PAGE_SHIFT,
163 PAGE_CACHE_SIZE, 0); 163 PAGE_SIZE, 0);
164 } 164 }
165 return truncate_complete_page(mapping, page); 165 return truncate_complete_page(mapping, page);
166} 166}
@@ -241,8 +241,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
241 return; 241 return;
242 242
243 /* Offsets within partial pages */ 243 /* Offsets within partial pages */
244 partial_start = lstart & (PAGE_CACHE_SIZE - 1); 244 partial_start = lstart & (PAGE_SIZE - 1);
245 partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 245 partial_end = (lend + 1) & (PAGE_SIZE - 1);
246 246
247 /* 247 /*
248 * 'start' and 'end' always covers the range of pages to be fully 248 * 'start' and 'end' always covers the range of pages to be fully
@@ -250,7 +250,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
250 * start of the range and 'partial_end' at the end of the range. 250 * start of the range and 'partial_end' at the end of the range.
251 * Note that 'end' is exclusive while 'lend' is inclusive. 251 * Note that 'end' is exclusive while 'lend' is inclusive.
252 */ 252 */
253 start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 253 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
254 if (lend == -1) 254 if (lend == -1)
255 /* 255 /*
256 * lend == -1 indicates end-of-file so we have to set 'end' 256 * lend == -1 indicates end-of-file so we have to set 'end'
@@ -259,7 +259,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
259 */ 259 */
260 end = -1; 260 end = -1;
261 else 261 else
262 end = (lend + 1) >> PAGE_CACHE_SHIFT; 262 end = (lend + 1) >> PAGE_SHIFT;
263 263
264 pagevec_init(&pvec, 0); 264 pagevec_init(&pvec, 0);
265 index = start; 265 index = start;
@@ -298,7 +298,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
298 if (partial_start) { 298 if (partial_start) {
299 struct page *page = find_lock_page(mapping, start - 1); 299 struct page *page = find_lock_page(mapping, start - 1);
300 if (page) { 300 if (page) {
301 unsigned int top = PAGE_CACHE_SIZE; 301 unsigned int top = PAGE_SIZE;
302 if (start > end) { 302 if (start > end) {
303 /* Truncation within a single page */ 303 /* Truncation within a single page */
304 top = partial_end; 304 top = partial_end;
@@ -311,7 +311,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
311 do_invalidatepage(page, partial_start, 311 do_invalidatepage(page, partial_start,
312 top - partial_start); 312 top - partial_start);
313 unlock_page(page); 313 unlock_page(page);
314 page_cache_release(page); 314 put_page(page);
315 } 315 }
316 } 316 }
317 if (partial_end) { 317 if (partial_end) {
@@ -324,7 +324,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
324 do_invalidatepage(page, 0, 324 do_invalidatepage(page, 0,
325 partial_end); 325 partial_end);
326 unlock_page(page); 326 unlock_page(page);
327 page_cache_release(page); 327 put_page(page);
328 } 328 }
329 } 329 }
330 /* 330 /*
@@ -538,7 +538,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
538 if (mapping->a_ops->freepage) 538 if (mapping->a_ops->freepage)
539 mapping->a_ops->freepage(page); 539 mapping->a_ops->freepage(page);
540 540
541 page_cache_release(page); /* pagecache ref */ 541 put_page(page); /* pagecache ref */
542 return 1; 542 return 1;
543failed: 543failed:
544 spin_unlock_irqrestore(&mapping->tree_lock, flags); 544 spin_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -608,18 +608,18 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
608 * Zap the rest of the file in one hit. 608 * Zap the rest of the file in one hit.
609 */ 609 */
610 unmap_mapping_range(mapping, 610 unmap_mapping_range(mapping,
611 (loff_t)index << PAGE_CACHE_SHIFT, 611 (loff_t)index << PAGE_SHIFT,
612 (loff_t)(1 + end - index) 612 (loff_t)(1 + end - index)
613 << PAGE_CACHE_SHIFT, 613 << PAGE_SHIFT,
614 0); 614 0);
615 did_range_unmap = 1; 615 did_range_unmap = 1;
616 } else { 616 } else {
617 /* 617 /*
618 * Just zap this page 618 * Just zap this page
619 */ 619 */
620 unmap_mapping_range(mapping, 620 unmap_mapping_range(mapping,
621 (loff_t)index << PAGE_CACHE_SHIFT, 621 (loff_t)index << PAGE_SHIFT,
622 PAGE_CACHE_SIZE, 0); 622 PAGE_SIZE, 0);
623 } 623 }
624 } 624 }
625 BUG_ON(page_mapped(page)); 625 BUG_ON(page_mapped(page));
@@ -744,14 +744,14 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
744 744
745 WARN_ON(to > inode->i_size); 745 WARN_ON(to > inode->i_size);
746 746
747 if (from >= to || bsize == PAGE_CACHE_SIZE) 747 if (from >= to || bsize == PAGE_SIZE)
748 return; 748 return;
749 /* Page straddling @from will not have any hole block created? */ 749 /* Page straddling @from will not have any hole block created? */
750 rounded_from = round_up(from, bsize); 750 rounded_from = round_up(from, bsize);
751 if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1))) 751 if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
752 return; 752 return;
753 753
754 index = from >> PAGE_CACHE_SHIFT; 754 index = from >> PAGE_SHIFT;
755 page = find_lock_page(inode->i_mapping, index); 755 page = find_lock_page(inode->i_mapping, index);
756 /* Page not cached? Nothing to do */ 756 /* Page not cached? Nothing to do */
757 if (!page) 757 if (!page)
@@ -763,7 +763,7 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
763 if (page_mkclean(page)) 763 if (page_mkclean(page))
764 set_page_dirty(page); 764 set_page_dirty(page);
765 unlock_page(page); 765 unlock_page(page);
766 page_cache_release(page); 766 put_page(page);
767} 767}
768EXPORT_SYMBOL(pagecache_isize_extended); 768EXPORT_SYMBOL(pagecache_isize_extended);
769 769