aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-04-19 16:29:15 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org.(none)>2005-04-19 16:29:15 -0400
commitee39b37b23da0b6ec53a8ebe90ff41c016f8ae27 (patch)
tree4af606913ab8f95551623b788c0c66c1f5902229 /include
parente0da382c92626ad1d7f4b7527d19b80104d67a83 (diff)
[PATCH] freepgt: remove MM_VM_SIZE(mm)
There's only one usage of MM_VM_SIZE(mm) left, and it's a troublesome macro because mm doesn't contain the (32-bit emulation?) info needed. But it too is only needed because we ignore the end from the vma list. We could make flush_pgtables return that end, or unmap_vmas. Choose the latter, since it's a natural fit with unmap_mapping_range_vma needing to know its restart addr. This does make more than minimal change, but if unmap_vmas had returned the end before, this is how we'd have done it, rather than storing the break_addr in zap_details. unmap_vmas used to return count of vmas scanned, but that's just debug which hasn't been useful in a while; and if we want the map_count 0 on exit check back, it can easily come from the final remove_vm_struct loop. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-ia64/processor.h8
-rw-r--r--include/asm-ppc64/processor.h4
-rw-r--r--include/asm-s390/processor.h2
-rw-r--r--include/linux/mm.h9
4 files changed, 2 insertions, 21 deletions
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 8769dd9df36..2807f8d766d 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -43,14 +43,6 @@
43#define TASK_SIZE (current->thread.task_size) 43#define TASK_SIZE (current->thread.task_size)
44 44
45/* 45/*
46 * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for
47 * address-space MM. Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE,
48 * because the kernel may have installed helper-mappings above TASK_SIZE. For example,
49 * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
50 */
51#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
52
53/*
54 * This decides where the kernel will search for a free chunk of vm 46 * This decides where the kernel will search for a free chunk of vm
55 * space during mmap's. 47 * space during mmap's.
56 */ 48 */
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
index cae65b30adb..0035efe2db2 100644
--- a/include/asm-ppc64/processor.h
+++ b/include/asm-ppc64/processor.h
@@ -542,10 +542,6 @@ extern struct task_struct *last_task_used_altivec;
542#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ 542#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
543 TASK_SIZE_USER32 : TASK_SIZE_USER64) 543 TASK_SIZE_USER32 : TASK_SIZE_USER64)
544 544
545/* We can't actually tell the TASK_SIZE given just the mm, but default
546 * to the 64-bit case to make sure that enough gets cleaned up. */
547#define MM_VM_SIZE(mm) TASK_SIZE_USER64
548
549/* This decides where the kernel will search for a free chunk of vm 545/* This decides where the kernel will search for a free chunk of vm
550 * space during mmap's. 546 * space during mmap's.
551 */ 547 */
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index cbbd1147167..88c272ca48b 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -74,8 +74,6 @@ extern struct task_struct *last_task_used_math;
74 74
75#endif /* __s390x__ */ 75#endif /* __s390x__ */
76 76
77#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE
78
79#define HAVE_ARCH_PICK_MMAP_LAYOUT 77#define HAVE_ARCH_PICK_MMAP_LAYOUT
80 78
81typedef struct { 79typedef struct {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c3f6c39d41d..59eca28b5ae 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -37,10 +37,6 @@ extern int sysctl_legacy_va_layout;
37#include <asm/processor.h> 37#include <asm/processor.h>
38#include <asm/atomic.h> 38#include <asm/atomic.h>
39 39
40#ifndef MM_VM_SIZE
41#define MM_VM_SIZE(mm) ((TASK_SIZE + PGDIR_SIZE - 1) & PGDIR_MASK)
42#endif
43
44#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 40#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
45 41
46/* 42/*
@@ -582,13 +578,12 @@ struct zap_details {
582 pgoff_t first_index; /* Lowest page->index to unmap */ 578 pgoff_t first_index; /* Lowest page->index to unmap */
583 pgoff_t last_index; /* Highest page->index to unmap */ 579 pgoff_t last_index; /* Highest page->index to unmap */
584 spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */ 580 spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */
585 unsigned long break_addr; /* Where unmap_vmas stopped */
586 unsigned long truncate_count; /* Compare vm_truncate_count */ 581 unsigned long truncate_count; /* Compare vm_truncate_count */
587}; 582};
588 583
589void zap_page_range(struct vm_area_struct *vma, unsigned long address, 584unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
590 unsigned long size, struct zap_details *); 585 unsigned long size, struct zap_details *);
591int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, 586unsigned long unmap_vmas(struct mmu_gather **tlb, struct mm_struct *mm,
592 struct vm_area_struct *start_vma, unsigned long start_addr, 587 struct vm_area_struct *start_vma, unsigned long start_addr,
593 unsigned long end_addr, unsigned long *nr_accounted, 588 unsigned long end_addr, unsigned long *nr_accounted,
594 struct zap_details *); 589 struct zap_details *);