aboutsummaryrefslogtreecommitdiffstats
path: root/mm/truncate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/truncate.c')
-rw-r--r--mm/truncate.c39
1 files changed, 22 insertions, 17 deletions
diff --git a/mm/truncate.c b/mm/truncate.c
index 450cebdabfc0..f42675a3615d 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/backing-dev.h> 11#include <linux/backing-dev.h>
12#include <linux/gfp.h>
12#include <linux/mm.h> 13#include <linux/mm.h>
13#include <linux/swap.h> 14#include <linux/swap.h>
14#include <linux/module.h> 15#include <linux/module.h>
@@ -272,6 +273,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
272 pagevec_release(&pvec); 273 pagevec_release(&pvec);
273 break; 274 break;
274 } 275 }
276 mem_cgroup_uncharge_start();
275 for (i = 0; i < pagevec_count(&pvec); i++) { 277 for (i = 0; i < pagevec_count(&pvec); i++) {
276 struct page *page = pvec.pages[i]; 278 struct page *page = pvec.pages[i];
277 279
@@ -286,6 +288,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
286 unlock_page(page); 288 unlock_page(page);
287 } 289 }
288 pagevec_release(&pvec); 290 pagevec_release(&pvec);
291 mem_cgroup_uncharge_end();
289 } 292 }
290} 293}
291EXPORT_SYMBOL(truncate_inode_pages_range); 294EXPORT_SYMBOL(truncate_inode_pages_range);
@@ -327,6 +330,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
327 pagevec_init(&pvec, 0); 330 pagevec_init(&pvec, 0);
328 while (next <= end && 331 while (next <= end &&
329 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 332 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
333 mem_cgroup_uncharge_start();
330 for (i = 0; i < pagevec_count(&pvec); i++) { 334 for (i = 0; i < pagevec_count(&pvec); i++) {
331 struct page *page = pvec.pages[i]; 335 struct page *page = pvec.pages[i];
332 pgoff_t index; 336 pgoff_t index;
@@ -354,6 +358,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
354 break; 358 break;
355 } 359 }
356 pagevec_release(&pvec); 360 pagevec_release(&pvec);
361 mem_cgroup_uncharge_end();
357 cond_resched(); 362 cond_resched();
358 } 363 }
359 return ret; 364 return ret;
@@ -428,6 +433,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
428 while (next <= end && !wrapped && 433 while (next <= end && !wrapped &&
429 pagevec_lookup(&pvec, mapping, next, 434 pagevec_lookup(&pvec, mapping, next,
430 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { 435 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
436 mem_cgroup_uncharge_start();
431 for (i = 0; i < pagevec_count(&pvec); i++) { 437 for (i = 0; i < pagevec_count(&pvec); i++) {
432 struct page *page = pvec.pages[i]; 438 struct page *page = pvec.pages[i];
433 pgoff_t page_index; 439 pgoff_t page_index;
@@ -477,6 +483,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
477 unlock_page(page); 483 unlock_page(page);
478 } 484 }
479 pagevec_release(&pvec); 485 pagevec_release(&pvec);
486 mem_cgroup_uncharge_end();
480 cond_resched(); 487 cond_resched();
481 } 488 }
482 return ret; 489 return ret;
@@ -490,7 +497,7 @@ EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
490 * Any pages which are found to be mapped into pagetables are unmapped prior to 497 * Any pages which are found to be mapped into pagetables are unmapped prior to
491 * invalidation. 498 * invalidation.
492 * 499 *
493 * Returns -EIO if any pages could not be invalidated. 500 * Returns -EBUSY if any pages could not be invalidated.
494 */ 501 */
495int invalidate_inode_pages2(struct address_space *mapping) 502int invalidate_inode_pages2(struct address_space *mapping)
496{ 503{
@@ -516,22 +523,20 @@ EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
516 */ 523 */
517void truncate_pagecache(struct inode *inode, loff_t old, loff_t new) 524void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
518{ 525{
519 if (new < old) { 526 struct address_space *mapping = inode->i_mapping;
520 struct address_space *mapping = inode->i_mapping; 527
521 528 /*
522 /* 529 * unmap_mapping_range is called twice, first simply for
523 * unmap_mapping_range is called twice, first simply for 530 * efficiency so that truncate_inode_pages does fewer
524 * efficiency so that truncate_inode_pages does fewer 531 * single-page unmaps. However after this first call, and
525 * single-page unmaps. However after this first call, and 532 * before truncate_inode_pages finishes, it is possible for
526 * before truncate_inode_pages finishes, it is possible for 533 * private pages to be COWed, which remain after
527 * private pages to be COWed, which remain after 534 * truncate_inode_pages finishes, hence the second
528 * truncate_inode_pages finishes, hence the second 535 * unmap_mapping_range call must be made for correctness.
529 * unmap_mapping_range call must be made for correctness. 536 */
530 */ 537 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
531 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1); 538 truncate_inode_pages(mapping, new);
532 truncate_inode_pages(mapping, new); 539 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
533 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
534 }
535} 540}
536EXPORT_SYMBOL(truncate_pagecache); 541EXPORT_SYMBOL(truncate_pagecache);
537 542