aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/asm-ia64/processor.h8
-rw-r--r--include/asm-ppc64/processor.h4
-rw-r--r--include/asm-s390/processor.h2
-rw-r--r--include/linux/mm.h9
-rw-r--r--mm/memory.c28
-rw-r--r--mm/mmap.c6
6 files changed, 18 insertions, 39 deletions
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 8769dd9df369..2807f8d766d4 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -43,14 +43,6 @@
43#define TASK_SIZE (current->thread.task_size) 43#define TASK_SIZE (current->thread.task_size)
44 44
45/* 45/*
46 * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for
47 * address-space MM. Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE,
48 * because the kernel may have installed helper-mappings above TASK_SIZE. For example,
49 * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
50 */
51#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
52
53/*
54 * This decides where the kernel will search for a free chunk of vm 46 * This decides where the kernel will search for a free chunk of vm
55 * space during mmap's. 47 * space during mmap's.
56 */ 48 */
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
index cae65b30adb8..0035efe2db2b 100644
--- a/include/asm-ppc64/processor.h
+++ b/include/asm-ppc64/processor.h
@@ -542,10 +542,6 @@ extern struct task_struct *last_task_used_altivec;
542#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ 542#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
543 TASK_SIZE_USER32 : TASK_SIZE_USER64) 543 TASK_SIZE_USER32 : TASK_SIZE_USER64)
544 544
545/* We can't actually tell the TASK_SIZE given just the mm, but default
546 * to the 64-bit case to make sure that enough gets cleaned up. */
547#define MM_VM_SIZE(mm) TASK_SIZE_USER64
548
549/* This decides where the kernel will search for a free chunk of vm 545/* This decides where the kernel will search for a free chunk of vm
550 * space during mmap's. 546 * space during mmap's.
551 */ 547 */
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index cbbd11471672..88c272ca48bf 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -74,8 +74,6 @@ extern struct task_struct *last_task_used_math;
74 74
75#endif /* __s390x__ */ 75#endif /* __s390x__ */
76 76
77#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
78
79#define HAVE_ARCH_PICK_MMAP_LAYOUT 77#define HAVE_ARCH_PICK_MMAP_LAYOUT
80 78
81typedef struct { 79typedef struct {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c3f6c39d41d0..59eca28b5ae2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -37,10 +37,6 @@ extern int sysctl_legacy_va_layout;
37#include <asm/processor.h> 37#include <asm/processor.h>
38#include <asm/atomic.h> 38#include <asm/atomic.h>
39 39
40#ifndef MM_VM_SIZE
41#define MM_VM_SIZE(mm) ((TASK_SIZE + PGDIR_SIZE - 1) & PGDIR_MASK)
42#endif
43
44#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 40#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
45 41
46/* 42/*
@@ -582,13 +578,12 @@ struct zap_details {
582 pgoff_t first_index; /* Lowest page->index to unmap */ 578 pgoff_t first_index; /* Lowest page->index to unmap */
583 pgoff_t last_index; /* Highest page->index to unmap */ 579 pgoff_t last_index; /* Highest page->index to unmap */
584 spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */ 580 spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */
585 unsigned long break_addr; /* Where unmap_vmas stopped */
586 unsigned long truncate_count; /* Compare vm_truncate_count */ 581 unsigned long truncate_count; /* Compare vm_truncate_count */
587}; 582};
588 583
589void zap_page_range(struct vm_area_struct *vma, unsigned long address, 584unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
590 unsigned long size, struct zap_details *); 585 unsigned long size, struct zap_details *);
591int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, 586unsigned long unmap_vmas(struct mmu_gather **tlb, struct mm_struct *mm,
592 struct vm_area_struct *start_vma, unsigned long start_addr, 587 struct vm_area_struct *start_vma, unsigned long start_addr,
593 unsigned long end_addr, unsigned long *nr_accounted, 588 unsigned long end_addr, unsigned long *nr_accounted,
594 struct zap_details *); 589 struct zap_details *);
diff --git a/mm/memory.c b/mm/memory.c
index fee5dc8fc36c..854bd90eeca1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -645,7 +645,7 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
645 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here 645 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
646 * @details: details of nonlinear truncation or shared cache invalidation 646 * @details: details of nonlinear truncation or shared cache invalidation
647 * 647 *
648 * Returns the number of vma's which were covered by the unmapping. 648 * Returns the end address of the unmapping (restart addr if interrupted).
649 * 649 *
650 * Unmap all pages in the vma list. Called under page_table_lock. 650 * Unmap all pages in the vma list. Called under page_table_lock.
651 * 651 *
@@ -662,7 +662,7 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
662 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 662 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
663 * drops the lock and schedules. 663 * drops the lock and schedules.
664 */ 664 */
665int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, 665unsigned long unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
666 struct vm_area_struct *vma, unsigned long start_addr, 666 struct vm_area_struct *vma, unsigned long start_addr,
667 unsigned long end_addr, unsigned long *nr_accounted, 667 unsigned long end_addr, unsigned long *nr_accounted,
668 struct zap_details *details) 668 struct zap_details *details)
@@ -670,12 +670,11 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
670 unsigned long zap_bytes = ZAP_BLOCK_SIZE; 670 unsigned long zap_bytes = ZAP_BLOCK_SIZE;
671 unsigned long tlb_start = 0; /* For tlb_finish_mmu */ 671 unsigned long tlb_start = 0; /* For tlb_finish_mmu */
672 int tlb_start_valid = 0; 672 int tlb_start_valid = 0;
673 int ret = 0; 673 unsigned long start = start_addr;
674 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 674 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
675 int fullmm = tlb_is_full_mm(*tlbp); 675 int fullmm = tlb_is_full_mm(*tlbp);
676 676
677 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 677 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
678 unsigned long start;
679 unsigned long end; 678 unsigned long end;
680 679
681 start = max(vma->vm_start, start_addr); 680 start = max(vma->vm_start, start_addr);
@@ -688,7 +687,6 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
688 if (vma->vm_flags & VM_ACCOUNT) 687 if (vma->vm_flags & VM_ACCOUNT)
689 *nr_accounted += (end - start) >> PAGE_SHIFT; 688 *nr_accounted += (end - start) >> PAGE_SHIFT;
690 689
691 ret++;
692 while (start != end) { 690 while (start != end) {
693 unsigned long block; 691 unsigned long block;
694 692
@@ -719,7 +717,6 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
719 if (i_mmap_lock) { 717 if (i_mmap_lock) {
720 /* must reset count of rss freed */ 718 /* must reset count of rss freed */
721 *tlbp = tlb_gather_mmu(mm, fullmm); 719 *tlbp = tlb_gather_mmu(mm, fullmm);
722 details->break_addr = start;
723 goto out; 720 goto out;
724 } 721 }
725 spin_unlock(&mm->page_table_lock); 722 spin_unlock(&mm->page_table_lock);
@@ -733,7 +730,7 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
733 } 730 }
734 } 731 }
735out: 732out:
736 return ret; 733 return start; /* which is now the end (or restart) address */
737} 734}
738 735
739/** 736/**
@@ -743,7 +740,7 @@ out:
743 * @size: number of bytes to zap 740 * @size: number of bytes to zap
744 * @details: details of nonlinear truncation or shared cache invalidation 741 * @details: details of nonlinear truncation or shared cache invalidation
745 */ 742 */
746void zap_page_range(struct vm_area_struct *vma, unsigned long address, 743unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
747 unsigned long size, struct zap_details *details) 744 unsigned long size, struct zap_details *details)
748{ 745{
749 struct mm_struct *mm = vma->vm_mm; 746 struct mm_struct *mm = vma->vm_mm;
@@ -753,15 +750,16 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
753 750
754 if (is_vm_hugetlb_page(vma)) { 751 if (is_vm_hugetlb_page(vma)) {
755 zap_hugepage_range(vma, address, size); 752 zap_hugepage_range(vma, address, size);
756 return; 753 return end;
757 } 754 }
758 755
759 lru_add_drain(); 756 lru_add_drain();
760 spin_lock(&mm->page_table_lock); 757 spin_lock(&mm->page_table_lock);
761 tlb = tlb_gather_mmu(mm, 0); 758 tlb = tlb_gather_mmu(mm, 0);
762 unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details); 759 end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
763 tlb_finish_mmu(tlb, address, end); 760 tlb_finish_mmu(tlb, address, end);
764 spin_unlock(&mm->page_table_lock); 761 spin_unlock(&mm->page_table_lock);
762 return end;
765} 763}
766 764
767/* 765/*
@@ -1348,7 +1346,7 @@ no_new_page:
1348 * i_mmap_lock. 1346 * i_mmap_lock.
1349 * 1347 *
1350 * In order to make forward progress despite repeatedly restarting some 1348 * In order to make forward progress despite repeatedly restarting some
1351 * large vma, note the break_addr set by unmap_vmas when it breaks out: 1349 * large vma, note the restart_addr from unmap_vmas when it breaks out:
1352 * and restart from that address when we reach that vma again. It might 1350 * and restart from that address when we reach that vma again. It might
1353 * have been split or merged, shrunk or extended, but never shifted: so 1351 * have been split or merged, shrunk or extended, but never shifted: so
1354 * restart_addr remains valid so long as it remains in the vma's range. 1352 * restart_addr remains valid so long as it remains in the vma's range.
@@ -1386,8 +1384,8 @@ again:
1386 } 1384 }
1387 } 1385 }
1388 1386
1389 details->break_addr = end_addr; 1387 restart_addr = zap_page_range(vma, start_addr,
1390 zap_page_range(vma, start_addr, end_addr - start_addr, details); 1388 end_addr - start_addr, details);
1391 1389
1392 /* 1390 /*
1393 * We cannot rely on the break test in unmap_vmas: 1391 * We cannot rely on the break test in unmap_vmas:
@@ -1398,14 +1396,14 @@ again:
1398 need_break = need_resched() || 1396 need_break = need_resched() ||
1399 need_lockbreak(details->i_mmap_lock); 1397 need_lockbreak(details->i_mmap_lock);
1400 1398
1401 if (details->break_addr >= end_addr) { 1399 if (restart_addr >= end_addr) {
1402 /* We have now completed this vma: mark it so */ 1400 /* We have now completed this vma: mark it so */
1403 vma->vm_truncate_count = details->truncate_count; 1401 vma->vm_truncate_count = details->truncate_count;
1404 if (!need_break) 1402 if (!need_break)
1405 return 0; 1403 return 0;
1406 } else { 1404 } else {
1407 /* Note restart_addr in vma's truncate_count field */ 1405 /* Note restart_addr in vma's truncate_count field */
1408 vma->vm_truncate_count = details->break_addr; 1406 vma->vm_truncate_count = restart_addr;
1409 if (!need_break) 1407 if (!need_break)
1410 goto again; 1408 goto again;
1411 } 1409 }
diff --git a/mm/mmap.c b/mm/mmap.c
index 926d03015471..f8c61b2385ff 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1900,6 +1900,7 @@ void exit_mmap(struct mm_struct *mm)
1900 struct mmu_gather *tlb; 1900 struct mmu_gather *tlb;
1901 struct vm_area_struct *vma = mm->mmap; 1901 struct vm_area_struct *vma = mm->mmap;
1902 unsigned long nr_accounted = 0; 1902 unsigned long nr_accounted = 0;
1903 unsigned long end;
1903 1904
1904 lru_add_drain(); 1905 lru_add_drain();
1905 1906
@@ -1908,10 +1909,10 @@ void exit_mmap(struct mm_struct *mm)
1908 flush_cache_mm(mm); 1909 flush_cache_mm(mm);
1909 tlb = tlb_gather_mmu(mm, 1); 1910 tlb = tlb_gather_mmu(mm, 1);
1910 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 1911 /* Use -1 here to ensure all VMAs in the mm are unmapped */
1911 mm->map_count -= unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); 1912 end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL);
1912 vm_unacct_memory(nr_accounted); 1913 vm_unacct_memory(nr_accounted);
1913 free_pgtables(&tlb, vma, 0, 0); 1914 free_pgtables(&tlb, vma, 0, 0);
1914 tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm)); 1915 tlb_finish_mmu(tlb, 0, end);
1915 1916
1916 mm->mmap = mm->mmap_cache = NULL; 1917 mm->mmap = mm->mmap_cache = NULL;
1917 mm->mm_rb = RB_ROOT; 1918 mm->mm_rb = RB_ROOT;
@@ -1931,7 +1932,6 @@ void exit_mmap(struct mm_struct *mm)
1931 vma = next; 1932 vma = next;
1932 } 1933 }
1933 1934
1934 BUG_ON(mm->map_count); /* This is just debugging */
1935 BUG_ON(mm->nr_ptes); /* This is just debugging */ 1935 BUG_ON(mm->nr_ptes); /* This is just debugging */
1936} 1936}
1937 1937