aboutsummaryrefslogtreecommitdiffstats
path: root/mm/truncate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/truncate.c')
-rw-r--r--mm/truncate.c74
1 files changed, 60 insertions, 14 deletions
diff --git a/mm/truncate.c b/mm/truncate.c
index ba887bff48c5..e13f22efaad7 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -19,6 +19,7 @@
19#include <linux/task_io_accounting_ops.h> 19#include <linux/task_io_accounting_ops.h>
20#include <linux/buffer_head.h> /* grr. try_to_release_page, 20#include <linux/buffer_head.h> /* grr. try_to_release_page,
21 do_invalidatepage */ 21 do_invalidatepage */
22#include <linux/cleancache.h>
22#include "internal.h" 23#include "internal.h"
23 24
24 25
@@ -51,6 +52,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
51static inline void truncate_partial_page(struct page *page, unsigned partial) 52static inline void truncate_partial_page(struct page *page, unsigned partial)
52{ 53{
53 zero_user_segment(page, partial, PAGE_CACHE_SIZE); 54 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
55 cleancache_flush_page(page->mapping, page);
54 if (page_has_private(page)) 56 if (page_has_private(page))
55 do_invalidatepage(page, partial); 57 do_invalidatepage(page, partial);
56} 58}
@@ -106,9 +108,8 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
106 cancel_dirty_page(page, PAGE_CACHE_SIZE); 108 cancel_dirty_page(page, PAGE_CACHE_SIZE);
107 109
108 clear_page_mlock(page); 110 clear_page_mlock(page);
109 remove_from_page_cache(page);
110 ClearPageMappedToDisk(page); 111 ClearPageMappedToDisk(page);
111 page_cache_release(page); /* pagecache ref */ 112 delete_from_page_cache(page);
112 return 0; 113 return 0;
113} 114}
114 115
@@ -215,6 +216,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
215 pgoff_t next; 216 pgoff_t next;
216 int i; 217 int i;
217 218
219 cleancache_flush_inode(mapping);
218 if (mapping->nrpages == 0) 220 if (mapping->nrpages == 0)
219 return; 221 return;
220 222
@@ -225,6 +227,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
225 next = start; 227 next = start;
226 while (next <= end && 228 while (next <= end &&
227 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 229 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
230 mem_cgroup_uncharge_start();
228 for (i = 0; i < pagevec_count(&pvec); i++) { 231 for (i = 0; i < pagevec_count(&pvec); i++) {
229 struct page *page = pvec.pages[i]; 232 struct page *page = pvec.pages[i];
230 pgoff_t page_index = page->index; 233 pgoff_t page_index = page->index;
@@ -247,6 +250,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
247 unlock_page(page); 250 unlock_page(page);
248 } 251 }
249 pagevec_release(&pvec); 252 pagevec_release(&pvec);
253 mem_cgroup_uncharge_end();
250 cond_resched(); 254 cond_resched();
251 } 255 }
252 256
@@ -290,6 +294,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
290 pagevec_release(&pvec); 294 pagevec_release(&pvec);
291 mem_cgroup_uncharge_end(); 295 mem_cgroup_uncharge_end();
292 } 296 }
297 cleancache_flush_inode(mapping);
293} 298}
294EXPORT_SYMBOL(truncate_inode_pages_range); 299EXPORT_SYMBOL(truncate_inode_pages_range);
295 300
@@ -299,6 +304,11 @@ EXPORT_SYMBOL(truncate_inode_pages_range);
299 * @lstart: offset from which to truncate 304 * @lstart: offset from which to truncate
300 * 305 *
301 * Called under (and serialised by) inode->i_mutex. 306 * Called under (and serialised by) inode->i_mutex.
307 *
308 * Note: When this function returns, there can be a page in the process of
309 * deletion (inside __delete_from_page_cache()) in the specified range. Thus
310 * mapping->nrpages can be non-zero when this function returns even after
311 * truncation of the whole mapping.
302 */ 312 */
303void truncate_inode_pages(struct address_space *mapping, loff_t lstart) 313void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
304{ 314{
@@ -320,11 +330,12 @@ EXPORT_SYMBOL(truncate_inode_pages);
320 * pagetables. 330 * pagetables.
321 */ 331 */
322unsigned long invalidate_mapping_pages(struct address_space *mapping, 332unsigned long invalidate_mapping_pages(struct address_space *mapping,
323 pgoff_t start, pgoff_t end) 333 pgoff_t start, pgoff_t end)
324{ 334{
325 struct pagevec pvec; 335 struct pagevec pvec;
326 pgoff_t next = start; 336 pgoff_t next = start;
327 unsigned long ret = 0; 337 unsigned long ret;
338 unsigned long count = 0;
328 int i; 339 int i;
329 340
330 pagevec_init(&pvec, 0); 341 pagevec_init(&pvec, 0);
@@ -351,9 +362,15 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
351 if (lock_failed) 362 if (lock_failed)
352 continue; 363 continue;
353 364
354 ret += invalidate_inode_page(page); 365 ret = invalidate_inode_page(page);
355
356 unlock_page(page); 366 unlock_page(page);
367 /*
368 * Invalidation is a hint that the page is no longer
369 * of interest and try to speed up its reclaim.
370 */
371 if (!ret)
372 deactivate_page(page);
373 count += ret;
357 if (next > end) 374 if (next > end)
358 break; 375 break;
359 } 376 }
@@ -361,7 +378,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
361 mem_cgroup_uncharge_end(); 378 mem_cgroup_uncharge_end();
362 cond_resched(); 379 cond_resched();
363 } 380 }
364 return ret; 381 return count;
365} 382}
366EXPORT_SYMBOL(invalidate_mapping_pages); 383EXPORT_SYMBOL(invalidate_mapping_pages);
367 384
@@ -387,9 +404,13 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
387 404
388 clear_page_mlock(page); 405 clear_page_mlock(page);
389 BUG_ON(page_has_private(page)); 406 BUG_ON(page_has_private(page));
390 __remove_from_page_cache(page); 407 __delete_from_page_cache(page);
391 spin_unlock_irq(&mapping->tree_lock); 408 spin_unlock_irq(&mapping->tree_lock);
392 mem_cgroup_uncharge_cache_page(page); 409 mem_cgroup_uncharge_cache_page(page);
410
411 if (mapping->a_ops->freepage)
412 mapping->a_ops->freepage(page);
413
393 page_cache_release(page); /* pagecache ref */ 414 page_cache_release(page); /* pagecache ref */
394 return 1; 415 return 1;
395failed: 416failed:
@@ -428,6 +449,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
428 int did_range_unmap = 0; 449 int did_range_unmap = 0;
429 int wrapped = 0; 450 int wrapped = 0;
430 451
452 cleancache_flush_inode(mapping);
431 pagevec_init(&pvec, 0); 453 pagevec_init(&pvec, 0);
432 next = start; 454 next = start;
433 while (next <= end && !wrapped && 455 while (next <= end && !wrapped &&
@@ -486,6 +508,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
486 mem_cgroup_uncharge_end(); 508 mem_cgroup_uncharge_end();
487 cond_resched(); 509 cond_resched();
488 } 510 }
511 cleancache_flush_inode(mapping);
489 return ret; 512 return ret;
490} 513}
491EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 514EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
@@ -545,13 +568,12 @@ EXPORT_SYMBOL(truncate_pagecache);
545 * @inode: inode 568 * @inode: inode
546 * @newsize: new file size 569 * @newsize: new file size
547 * 570 *
548 * truncate_setsize updastes i_size update and performs pagecache 571 * truncate_setsize updates i_size and performs pagecache truncation (if
549 * truncation (if necessary) for a file size updates. It will be 572 * necessary) to @newsize. It will be typically be called from the filesystem's
550 * typically be called from the filesystem's setattr function when 573 * setattr function when ATTR_SIZE is passed in.
551 * ATTR_SIZE is passed in.
552 * 574 *
553 * Must be called with inode_mutex held and after all filesystem 575 * Must be called with inode_mutex held and before all filesystem specific
554 * specific block truncation has been performed. 576 * block truncation has been performed.
555 */ 577 */
556void truncate_setsize(struct inode *inode, loff_t newsize) 578void truncate_setsize(struct inode *inode, loff_t newsize)
557{ 579{
@@ -586,3 +608,27 @@ int vmtruncate(struct inode *inode, loff_t offset)
586 return 0; 608 return 0;
587} 609}
588EXPORT_SYMBOL(vmtruncate); 610EXPORT_SYMBOL(vmtruncate);
611
612int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
613{
614 struct address_space *mapping = inode->i_mapping;
615
616 /*
617 * If the underlying filesystem is not going to provide
618 * a way to truncate a range of blocks (punch a hole) -
619 * we should return failure right now.
620 */
621 if (!inode->i_op->truncate_range)
622 return -ENOSYS;
623
624 mutex_lock(&inode->i_mutex);
625 down_write(&inode->i_alloc_sem);
626 unmap_mapping_range(mapping, offset, (end - offset), 1);
627 inode->i_op->truncate_range(inode, offset, end);
628 /* unmap again to remove racily COWed private pages */
629 unmap_mapping_range(mapping, offset, (end - offset), 1);
630 up_write(&inode->i_alloc_sem);
631 mutex_unlock(&inode->i_mutex);
632
633 return 0;
634}