aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c28
1 files changed, 13 insertions, 15 deletions
diff --git a/mm/memory.c b/mm/memory.c
index fee5dc8fc36c..854bd90eeca1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -645,7 +645,7 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
645 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here 645 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
646 * @details: details of nonlinear truncation or shared cache invalidation 646 * @details: details of nonlinear truncation or shared cache invalidation
647 * 647 *
648 * Returns the number of vma's which were covered by the unmapping. 648 * Returns the end address of the unmapping (restart addr if interrupted).
649 * 649 *
650 * Unmap all pages in the vma list. Called under page_table_lock. 650 * Unmap all pages in the vma list. Called under page_table_lock.
651 * 651 *
@@ -662,7 +662,7 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
662 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 662 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
663 * drops the lock and schedules. 663 * drops the lock and schedules.
664 */ 664 */
665int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, 665unsigned long unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
666 struct vm_area_struct *vma, unsigned long start_addr, 666 struct vm_area_struct *vma, unsigned long start_addr,
667 unsigned long end_addr, unsigned long *nr_accounted, 667 unsigned long end_addr, unsigned long *nr_accounted,
668 struct zap_details *details) 668 struct zap_details *details)
@@ -670,12 +670,11 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
670 unsigned long zap_bytes = ZAP_BLOCK_SIZE; 670 unsigned long zap_bytes = ZAP_BLOCK_SIZE;
671 unsigned long tlb_start = 0; /* For tlb_finish_mmu */ 671 unsigned long tlb_start = 0; /* For tlb_finish_mmu */
672 int tlb_start_valid = 0; 672 int tlb_start_valid = 0;
673 int ret = 0; 673 unsigned long start = start_addr;
674 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 674 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
675 int fullmm = tlb_is_full_mm(*tlbp); 675 int fullmm = tlb_is_full_mm(*tlbp);
676 676
677 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 677 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
678 unsigned long start;
679 unsigned long end; 678 unsigned long end;
680 679
681 start = max(vma->vm_start, start_addr); 680 start = max(vma->vm_start, start_addr);
@@ -688,7 +687,6 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
688 if (vma->vm_flags & VM_ACCOUNT) 687 if (vma->vm_flags & VM_ACCOUNT)
689 *nr_accounted += (end - start) >> PAGE_SHIFT; 688 *nr_accounted += (end - start) >> PAGE_SHIFT;
690 689
691 ret++;
692 while (start != end) { 690 while (start != end) {
693 unsigned long block; 691 unsigned long block;
694 692
@@ -719,7 +717,6 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
719 if (i_mmap_lock) { 717 if (i_mmap_lock) {
720 /* must reset count of rss freed */ 718 /* must reset count of rss freed */
721 *tlbp = tlb_gather_mmu(mm, fullmm); 719 *tlbp = tlb_gather_mmu(mm, fullmm);
722 details->break_addr = start;
723 goto out; 720 goto out;
724 } 721 }
725 spin_unlock(&mm->page_table_lock); 722 spin_unlock(&mm->page_table_lock);
@@ -733,7 +730,7 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
733 } 730 }
734 } 731 }
735out: 732out:
736 return ret; 733 return start; /* which is now the end (or restart) address */
737} 734}
738 735
739/** 736/**
@@ -743,7 +740,7 @@ out:
743 * @size: number of bytes to zap 740 * @size: number of bytes to zap
744 * @details: details of nonlinear truncation or shared cache invalidation 741 * @details: details of nonlinear truncation or shared cache invalidation
745 */ 742 */
746void zap_page_range(struct vm_area_struct *vma, unsigned long address, 743unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
747 unsigned long size, struct zap_details *details) 744 unsigned long size, struct zap_details *details)
748{ 745{
749 struct mm_struct *mm = vma->vm_mm; 746 struct mm_struct *mm = vma->vm_mm;
@@ -753,15 +750,16 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
753 750
754 if (is_vm_hugetlb_page(vma)) { 751 if (is_vm_hugetlb_page(vma)) {
755 zap_hugepage_range(vma, address, size); 752 zap_hugepage_range(vma, address, size);
756 return; 753 return end;
757 } 754 }
758 755
759 lru_add_drain(); 756 lru_add_drain();
760 spin_lock(&mm->page_table_lock); 757 spin_lock(&mm->page_table_lock);
761 tlb = tlb_gather_mmu(mm, 0); 758 tlb = tlb_gather_mmu(mm, 0);
762 unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details); 759 end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
763 tlb_finish_mmu(tlb, address, end); 760 tlb_finish_mmu(tlb, address, end);
764 spin_unlock(&mm->page_table_lock); 761 spin_unlock(&mm->page_table_lock);
762 return end;
765} 763}
766 764
767/* 765/*
@@ -1348,7 +1346,7 @@ no_new_page:
1348 * i_mmap_lock. 1346 * i_mmap_lock.
1349 * 1347 *
1350 * In order to make forward progress despite repeatedly restarting some 1348 * In order to make forward progress despite repeatedly restarting some
1351 * large vma, note the break_addr set by unmap_vmas when it breaks out: 1349 * large vma, note the restart_addr from unmap_vmas when it breaks out:
1352 * and restart from that address when we reach that vma again. It might 1350 * and restart from that address when we reach that vma again. It might
1353 * have been split or merged, shrunk or extended, but never shifted: so 1351 * have been split or merged, shrunk or extended, but never shifted: so
1354 * restart_addr remains valid so long as it remains in the vma's range. 1352 * restart_addr remains valid so long as it remains in the vma's range.
@@ -1386,8 +1384,8 @@ again:
1386 } 1384 }
1387 } 1385 }
1388 1386
1389 details->break_addr = end_addr; 1387 restart_addr = zap_page_range(vma, start_addr,
1390 zap_page_range(vma, start_addr, end_addr - start_addr, details); 1388 end_addr - start_addr, details);
1391 1389
1392 /* 1390 /*
1393 * We cannot rely on the break test in unmap_vmas: 1391 * We cannot rely on the break test in unmap_vmas:
@@ -1398,14 +1396,14 @@ again:
1398 need_break = need_resched() || 1396 need_break = need_resched() ||
1399 need_lockbreak(details->i_mmap_lock); 1397 need_lockbreak(details->i_mmap_lock);
1400 1398
1401 if (details->break_addr >= end_addr) { 1399 if (restart_addr >= end_addr) {
1402 /* We have now completed this vma: mark it so */ 1400 /* We have now completed this vma: mark it so */
1403 vma->vm_truncate_count = details->truncate_count; 1401 vma->vm_truncate_count = details->truncate_count;
1404 if (!need_break) 1402 if (!need_break)
1405 return 0; 1403 return 0;
1406 } else { 1404 } else {
1407 /* Note restart_addr in vma's truncate_count field */ 1405 /* Note restart_addr in vma's truncate_count field */
1408 vma->vm_truncate_count = details->break_addr; 1406 vma->vm_truncate_count = restart_addr;
1409 if (!need_break) 1407 if (!need_break)
1410 goto again; 1408 goto again;
1411 } 1409 }