aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2007-03-29 04:20:35 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-03-29 11:22:25 -0400
commita2646d1e6c8d2239d8054a7d342eb9775a1d273a (patch)
treedcb87e633ab7721fc6b94020ba3c8dd5213d6fac /mm
parent96fac9dc95b91fc198bfbf4ba90263b06eff023d (diff)
[PATCH] holepunch: fix shmem_truncate_range punching too far
Miklos Szeredi observes BUG_ON(!entry) in shmem_writepage() triggered in rare circumstances, because shmem_truncate_range() erroneously removes partially truncated directory pages at the end of the range: later reclaim on pages pointing to these removed directories triggers the BUG. Indeed, and it can also cause data loss beyond the hole. Fix this as in the patch proposed by Miklos, but distinguish between "limit" (how far we need to search: ignore truncation's next_index optimization in the holepunch case - if there are races it's more consistent to act on the whole range specified) and "upper_limit" (how far we can free directory pages: generally we must be careful to keep partially punched pages, but can relax at end of file - i_size being held stable by i_mutex). Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: Miklos Szeredi <mszeredi@suse.cs> Cc: Badari Pulavarty <pbadari@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/shmem.c32
1 files changed, 21 insertions, 11 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index b8c429a2d271..1077b1d903d2 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -481,7 +481,8 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
481 long nr_swaps_freed = 0; 481 long nr_swaps_freed = 0;
482 int offset; 482 int offset;
483 int freed; 483 int freed;
484 int punch_hole = 0; 484 int punch_hole;
485 unsigned long upper_limit;
485 486
486 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 487 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
487 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 488 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
@@ -492,11 +493,18 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
492 info->flags |= SHMEM_TRUNCATE; 493 info->flags |= SHMEM_TRUNCATE;
493 if (likely(end == (loff_t) -1)) { 494 if (likely(end == (loff_t) -1)) {
494 limit = info->next_index; 495 limit = info->next_index;
496 upper_limit = SHMEM_MAX_INDEX;
495 info->next_index = idx; 497 info->next_index = idx;
498 punch_hole = 0;
496 } else { 499 } else {
497 limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 500 if (end + 1 >= inode->i_size) { /* we may free a little more */
498 if (limit > info->next_index) 501 limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
499 limit = info->next_index; 502 PAGE_CACHE_SHIFT;
503 upper_limit = SHMEM_MAX_INDEX;
504 } else {
505 limit = (end + 1) >> PAGE_CACHE_SHIFT;
506 upper_limit = limit;
507 }
500 punch_hole = 1; 508 punch_hole = 1;
501 } 509 }
502 510
@@ -520,10 +528,10 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
520 * If there are no indirect blocks or we are punching a hole 528 * If there are no indirect blocks or we are punching a hole
521 * below indirect blocks, nothing to be done. 529 * below indirect blocks, nothing to be done.
522 */ 530 */
523 if (!topdir || (punch_hole && (limit <= SHMEM_NR_DIRECT))) 531 if (!topdir || limit <= SHMEM_NR_DIRECT)
524 goto done2; 532 goto done2;
525 533
526 BUG_ON(limit <= SHMEM_NR_DIRECT); 534 upper_limit -= SHMEM_NR_DIRECT;
527 limit -= SHMEM_NR_DIRECT; 535 limit -= SHMEM_NR_DIRECT;
528 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; 536 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
529 offset = idx % ENTRIES_PER_PAGE; 537 offset = idx % ENTRIES_PER_PAGE;
@@ -543,7 +551,7 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
543 if (*dir) { 551 if (*dir) {
544 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % 552 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
545 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; 553 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
546 if (!diroff && !offset) { 554 if (!diroff && !offset && upper_limit >= stage) {
547 *dir = NULL; 555 *dir = NULL;
548 nr_pages_to_free++; 556 nr_pages_to_free++;
549 list_add(&middir->lru, &pages_to_free); 557 list_add(&middir->lru, &pages_to_free);
@@ -570,9 +578,11 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
570 } 578 }
571 stage = idx + ENTRIES_PER_PAGEPAGE; 579 stage = idx + ENTRIES_PER_PAGEPAGE;
572 middir = *dir; 580 middir = *dir;
573 *dir = NULL; 581 if (upper_limit >= stage) {
574 nr_pages_to_free++; 582 *dir = NULL;
575 list_add(&middir->lru, &pages_to_free); 583 nr_pages_to_free++;
584 list_add(&middir->lru, &pages_to_free);
585 }
576 shmem_dir_unmap(dir); 586 shmem_dir_unmap(dir);
577 cond_resched(); 587 cond_resched();
578 dir = shmem_dir_map(middir); 588 dir = shmem_dir_map(middir);
@@ -598,7 +608,7 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
598 } 608 }
599 if (offset) 609 if (offset)
600 offset = 0; 610 offset = 0;
601 else if (subdir && !page_private(subdir)) { 611 else if (subdir && upper_limit - idx >= ENTRIES_PER_PAGE) {
602 dir[diroff] = NULL; 612 dir[diroff] = NULL;
603 nr_pages_to_free++; 613 nr_pages_to_free++;
604 list_add(&subdir->lru, &pages_to_free); 614 list_add(&subdir->lru, &pages_to_free);