diff options
Diffstat (limited to 'mm/truncate.c')
-rw-r--r-- | mm/truncate.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/mm/truncate.c b/mm/truncate.c index 7598b552ae03..b00272810871 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -118,7 +118,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) | |||
118 | return -EIO; | 118 | return -EIO; |
119 | 119 | ||
120 | if (page_has_private(page)) | 120 | if (page_has_private(page)) |
121 | do_invalidatepage(page, 0, PAGE_CACHE_SIZE); | 121 | do_invalidatepage(page, 0, PAGE_SIZE); |
122 | 122 | ||
123 | /* | 123 | /* |
124 | * Some filesystems seem to re-dirty the page even after | 124 | * Some filesystems seem to re-dirty the page even after |
@@ -159,8 +159,8 @@ int truncate_inode_page(struct address_space *mapping, struct page *page) | |||
159 | { | 159 | { |
160 | if (page_mapped(page)) { | 160 | if (page_mapped(page)) { |
161 | unmap_mapping_range(mapping, | 161 | unmap_mapping_range(mapping, |
162 | (loff_t)page->index << PAGE_CACHE_SHIFT, | 162 | (loff_t)page->index << PAGE_SHIFT, |
163 | PAGE_CACHE_SIZE, 0); | 163 | PAGE_SIZE, 0); |
164 | } | 164 | } |
165 | return truncate_complete_page(mapping, page); | 165 | return truncate_complete_page(mapping, page); |
166 | } | 166 | } |
@@ -241,8 +241,8 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
241 | return; | 241 | return; |
242 | 242 | ||
243 | /* Offsets within partial pages */ | 243 | /* Offsets within partial pages */ |
244 | partial_start = lstart & (PAGE_CACHE_SIZE - 1); | 244 | partial_start = lstart & (PAGE_SIZE - 1); |
245 | partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); | 245 | partial_end = (lend + 1) & (PAGE_SIZE - 1); |
246 | 246 | ||
247 | /* | 247 | /* |
248 | * 'start' and 'end' always covers the range of pages to be fully | 248 | * 'start' and 'end' always covers the range of pages to be fully |
@@ -250,7 +250,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
250 | * start of the range and 'partial_end' at the end of the range. | 250 | * start of the range and 'partial_end' at the end of the range. |
251 | * Note that 'end' is exclusive while 'lend' is inclusive. | 251 | * Note that 'end' is exclusive while 'lend' is inclusive. |
252 | */ | 252 | */ |
253 | start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 253 | start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; |
254 | if (lend == -1) | 254 | if (lend == -1) |
255 | /* | 255 | /* |
256 | * lend == -1 indicates end-of-file so we have to set 'end' | 256 | * lend == -1 indicates end-of-file so we have to set 'end' |
@@ -259,7 +259,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
259 | */ | 259 | */ |
260 | end = -1; | 260 | end = -1; |
261 | else | 261 | else |
262 | end = (lend + 1) >> PAGE_CACHE_SHIFT; | 262 | end = (lend + 1) >> PAGE_SHIFT; |
263 | 263 | ||
264 | pagevec_init(&pvec, 0); | 264 | pagevec_init(&pvec, 0); |
265 | index = start; | 265 | index = start; |
@@ -298,7 +298,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
298 | if (partial_start) { | 298 | if (partial_start) { |
299 | struct page *page = find_lock_page(mapping, start - 1); | 299 | struct page *page = find_lock_page(mapping, start - 1); |
300 | if (page) { | 300 | if (page) { |
301 | unsigned int top = PAGE_CACHE_SIZE; | 301 | unsigned int top = PAGE_SIZE; |
302 | if (start > end) { | 302 | if (start > end) { |
303 | /* Truncation within a single page */ | 303 | /* Truncation within a single page */ |
304 | top = partial_end; | 304 | top = partial_end; |
@@ -311,7 +311,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
311 | do_invalidatepage(page, partial_start, | 311 | do_invalidatepage(page, partial_start, |
312 | top - partial_start); | 312 | top - partial_start); |
313 | unlock_page(page); | 313 | unlock_page(page); |
314 | page_cache_release(page); | 314 | put_page(page); |
315 | } | 315 | } |
316 | } | 316 | } |
317 | if (partial_end) { | 317 | if (partial_end) { |
@@ -324,7 +324,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
324 | do_invalidatepage(page, 0, | 324 | do_invalidatepage(page, 0, |
325 | partial_end); | 325 | partial_end); |
326 | unlock_page(page); | 326 | unlock_page(page); |
327 | page_cache_release(page); | 327 | put_page(page); |
328 | } | 328 | } |
329 | } | 329 | } |
330 | /* | 330 | /* |
@@ -538,7 +538,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page) | |||
538 | if (mapping->a_ops->freepage) | 538 | if (mapping->a_ops->freepage) |
539 | mapping->a_ops->freepage(page); | 539 | mapping->a_ops->freepage(page); |
540 | 540 | ||
541 | page_cache_release(page); /* pagecache ref */ | 541 | put_page(page); /* pagecache ref */ |
542 | return 1; | 542 | return 1; |
543 | failed: | 543 | failed: |
544 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | 544 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
@@ -608,18 +608,18 @@ int invalidate_inode_pages2_range(struct address_space *mapping, | |||
608 | * Zap the rest of the file in one hit. | 608 | * Zap the rest of the file in one hit. |
609 | */ | 609 | */ |
610 | unmap_mapping_range(mapping, | 610 | unmap_mapping_range(mapping, |
611 | (loff_t)index << PAGE_CACHE_SHIFT, | 611 | (loff_t)index << PAGE_SHIFT, |
612 | (loff_t)(1 + end - index) | 612 | (loff_t)(1 + end - index) |
613 | << PAGE_CACHE_SHIFT, | 613 | << PAGE_SHIFT, |
614 | 0); | 614 | 0); |
615 | did_range_unmap = 1; | 615 | did_range_unmap = 1; |
616 | } else { | 616 | } else { |
617 | /* | 617 | /* |
618 | * Just zap this page | 618 | * Just zap this page |
619 | */ | 619 | */ |
620 | unmap_mapping_range(mapping, | 620 | unmap_mapping_range(mapping, |
621 | (loff_t)index << PAGE_CACHE_SHIFT, | 621 | (loff_t)index << PAGE_SHIFT, |
622 | PAGE_CACHE_SIZE, 0); | 622 | PAGE_SIZE, 0); |
623 | } | 623 | } |
624 | } | 624 | } |
625 | BUG_ON(page_mapped(page)); | 625 | BUG_ON(page_mapped(page)); |
@@ -744,14 +744,14 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) | |||
744 | 744 | ||
745 | WARN_ON(to > inode->i_size); | 745 | WARN_ON(to > inode->i_size); |
746 | 746 | ||
747 | if (from >= to || bsize == PAGE_CACHE_SIZE) | 747 | if (from >= to || bsize == PAGE_SIZE) |
748 | return; | 748 | return; |
749 | /* Page straddling @from will not have any hole block created? */ | 749 | /* Page straddling @from will not have any hole block created? */ |
750 | rounded_from = round_up(from, bsize); | 750 | rounded_from = round_up(from, bsize); |
751 | if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1))) | 751 | if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1))) |
752 | return; | 752 | return; |
753 | 753 | ||
754 | index = from >> PAGE_CACHE_SHIFT; | 754 | index = from >> PAGE_SHIFT; |
755 | page = find_lock_page(inode->i_mapping, index); | 755 | page = find_lock_page(inode->i_mapping, index); |
756 | /* Page not cached? Nothing to do */ | 756 | /* Page not cached? Nothing to do */ |
757 | if (!page) | 757 | if (!page) |
@@ -763,7 +763,7 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) | |||
763 | if (page_mkclean(page)) | 763 | if (page_mkclean(page)) |
764 | set_page_dirty(page); | 764 | set_page_dirty(page); |
765 | unlock_page(page); | 765 | unlock_page(page); |
766 | page_cache_release(page); | 766 | put_page(page); |
767 | } | 767 | } |
768 | EXPORT_SYMBOL(pagecache_isize_extended); | 768 | EXPORT_SYMBOL(pagecache_isize_extended); |
769 | 769 | ||