aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2012-12-11 19:02:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 20:22:26 -0500
commit2aea28b975ad1fbc7448e964d147fe96e3544998 (patch)
treedf94f0b4638c907a6e86714e6e9c8d2f5eebe0cf /arch/sparc
parentbb64f55019c7b0971cdcfbf3af9fb6376dd01ead (diff)
mm: use vm_unmapped_area() in hugetlbfs on sparc64 architecture
Update the sparc64 hugetlb_get_unmapped_area function to make use of vm_unmapped_area() instead of implementing a brute force search. Signed-off-by: Michel Lespinasse <walken@google.com> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/mm/hugetlbpage.c124
1 files changed, 30 insertions, 94 deletions
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index f76f83d5ac63..d2b59441ebdd 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -30,55 +30,28 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
30 unsigned long pgoff, 30 unsigned long pgoff,
31 unsigned long flags) 31 unsigned long flags)
32{ 32{
33 struct mm_struct *mm = current->mm;
34 struct vm_area_struct * vma;
35 unsigned long task_size = TASK_SIZE; 33 unsigned long task_size = TASK_SIZE;
36 unsigned long start_addr; 34 struct vm_unmapped_area_info info;
37 35
38 if (test_thread_flag(TIF_32BIT)) 36 if (test_thread_flag(TIF_32BIT))
39 task_size = STACK_TOP32; 37 task_size = STACK_TOP32;
40 if (unlikely(len >= VA_EXCLUDE_START))
41 return -ENOMEM;
42 38
43 if (len > mm->cached_hole_size) { 39 info.flags = 0;
44 start_addr = addr = mm->free_area_cache; 40 info.length = len;
45 } else { 41 info.low_limit = TASK_UNMAPPED_BASE;
46 start_addr = addr = TASK_UNMAPPED_BASE; 42 info.high_limit = min(task_size, VA_EXCLUDE_START);
47 mm->cached_hole_size = 0; 43 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
44 info.align_offset = 0;
45 addr = vm_unmapped_area(&info);
46
47 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
48 VM_BUG_ON(addr != -ENOMEM);
49 info.low_limit = VA_EXCLUDE_END;
50 info.high_limit = task_size;
51 addr = vm_unmapped_area(&info);
48 } 52 }
49 53
50 task_size -= len; 54 return addr;
51
52full_search:
53 addr = ALIGN(addr, HPAGE_SIZE);
54
55 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
56 /* At this point: (!vma || addr < vma->vm_end). */
57 if (addr < VA_EXCLUDE_START &&
58 (addr + len) >= VA_EXCLUDE_START) {
59 addr = VA_EXCLUDE_END;
60 vma = find_vma(mm, VA_EXCLUDE_END);
61 }
62 if (unlikely(task_size < addr)) {
63 if (start_addr != TASK_UNMAPPED_BASE) {
64 start_addr = addr = TASK_UNMAPPED_BASE;
65 mm->cached_hole_size = 0;
66 goto full_search;
67 }
68 return -ENOMEM;
69 }
70 if (likely(!vma || addr + len <= vma->vm_start)) {
71 /*
72 * Remember the place where we stopped the search:
73 */
74 mm->free_area_cache = addr + len;
75 return addr;
76 }
77 if (addr + mm->cached_hole_size < vma->vm_start)
78 mm->cached_hole_size = vma->vm_start - addr;
79
80 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
81 }
82} 55}
83 56
84static unsigned long 57static unsigned long
@@ -87,71 +60,34 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87 const unsigned long pgoff, 60 const unsigned long pgoff,
88 const unsigned long flags) 61 const unsigned long flags)
89{ 62{
90 struct vm_area_struct *vma;
91 struct mm_struct *mm = current->mm; 63 struct mm_struct *mm = current->mm;
92 unsigned long addr = addr0; 64 unsigned long addr = addr0;
65 struct vm_unmapped_area_info info;
93 66
94 /* This should only ever run for 32-bit processes. */ 67 /* This should only ever run for 32-bit processes. */
95 BUG_ON(!test_thread_flag(TIF_32BIT)); 68 BUG_ON(!test_thread_flag(TIF_32BIT));
96 69
97 /* check if free_area_cache is useful for us */ 70 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
98 if (len <= mm->cached_hole_size) { 71 info.length = len;
99 mm->cached_hole_size = 0; 72 info.low_limit = PAGE_SIZE;
100 mm->free_area_cache = mm->mmap_base; 73 info.high_limit = mm->mmap_base;
101 } 74 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
102 75 info.align_offset = 0;
103 /* either no address requested or can't fit in requested address hole */ 76 addr = vm_unmapped_area(&info);
104 addr = mm->free_area_cache & HPAGE_MASK;
105
106 /* make sure it can fit in the remaining address space */
107 if (likely(addr > len)) {
108 vma = find_vma(mm, addr-len);
109 if (!vma || addr <= vma->vm_start) {
110 /* remember the address as a hint for next time */
111 return (mm->free_area_cache = addr-len);
112 }
113 }
114
115 if (unlikely(mm->mmap_base < len))
116 goto bottomup;
117
118 addr = (mm->mmap_base-len) & HPAGE_MASK;
119
120 do {
121 /*
122 * Lookup failure means no vma is above this address,
123 * else if new region fits below vma->vm_start,
124 * return with success:
125 */
126 vma = find_vma(mm, addr);
127 if (likely(!vma || addr+len <= vma->vm_start)) {
128 /* remember the address as a hint for next time */
129 return (mm->free_area_cache = addr);
130 }
131
132 /* remember the largest hole we saw so far */
133 if (addr + mm->cached_hole_size < vma->vm_start)
134 mm->cached_hole_size = vma->vm_start - addr;
135
136 /* try just below the current vma->vm_start */
137 addr = (vma->vm_start-len) & HPAGE_MASK;
138 } while (likely(len < vma->vm_start));
139 77
140bottomup:
141 /* 78 /*
142 * A failed mmap() very likely causes application failure, 79 * A failed mmap() very likely causes application failure,
143 * so fall back to the bottom-up function here. This scenario 80 * so fall back to the bottom-up function here. This scenario
144 * can happen with large stack limits and large mmap() 81 * can happen with large stack limits and large mmap()
145 * allocations. 82 * allocations.
146 */ 83 */
147 mm->cached_hole_size = ~0UL; 84 if (addr & ~PAGE_MASK) {
148 mm->free_area_cache = TASK_UNMAPPED_BASE; 85 VM_BUG_ON(addr != -ENOMEM);
149 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 86 info.flags = 0;
150 /* 87 info.low_limit = TASK_UNMAPPED_BASE;
151 * Restore the topdown base: 88 info.high_limit = STACK_TOP32;
152 */ 89 addr = vm_unmapped_area(&info);
153 mm->free_area_cache = mm->mmap_base; 90 }
154 mm->cached_hole_size = ~0UL;
155 91
156 return addr; 92 return addr;
157} 93}