aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 21:05:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 21:05:37 -0500
commit608ff1a210ab0e8b969399039bf8e18693605910 (patch)
treefaea7bb1764461c73d0953089bd5439d91733a03 /arch
parent414a6750e59b0b687034764c464e9ddecac0f7a6 (diff)
parent74d42d8fe146e870c52bde3b1c692f86cc8ff844 (diff)
Merge branch 'akpm' (Andrew's patchbomb)
Merge misc updates from Andrew Morton: "About half of most of MM. Going very early this time due to uncertainty over the coreautounifiednumasched things. I'll send the other half of most of MM tomorrow. The rest of MM awaits a slab merge from Pekka." * emailed patches from Andrew Morton: (71 commits) memory_hotplug: ensure every online node has NORMAL memory memory_hotplug: handle empty zone when online_movable/online_kernel mm, memory-hotplug: dynamic configure movable memory and portion memory drivers/base/node.c: cleanup node_state_attr[] bootmem: fix wrong call parameter for free_bootmem() avr32, kconfig: remove HAVE_ARCH_BOOTMEM mm: cma: remove watermark hacks mm: cma: skip watermarks check for already isolated blocks in split_free_page() mm, oom: fix race when specifying a thread as the oom origin mm, oom: change type of oom_score_adj to short mm: cleanup register_node() mm, mempolicy: remove duplicate code mm/vmscan.c: try_to_freeze() returns boolean mm: introduce putback_movable_pages() virtio_balloon: introduce migration primitives to balloon pages mm: introduce compaction and migration for ballooned pages mm: introduce a common interface for balloon pages mobility mm: redefine address_space.assoc_mapping mm: adjust address_space_operations.migratepage() return code arch/sparc/kernel/sys_sparc_64.c: s/COLOUR/COLOR/ ...
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/mman.h11
-rw-r--r--arch/arm/mm/mmap.c132
-rw-r--r--arch/avr32/Kconfig3
-rw-r--r--arch/mips/include/uapi/asm/mman.h11
-rw-r--r--arch/mips/mm/mmap.c111
-rw-r--r--arch/parisc/include/uapi/asm/mman.h11
-rw-r--r--arch/powerpc/kernel/sysfs.c4
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.c4
-rw-r--r--arch/s390/include/asm/page.h3
-rw-r--r--arch/sh/mm/mmap.c137
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c27
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c150
-rw-r--r--arch/sparc/mm/hugetlbpage.c124
-rw-r--r--arch/tile/mm/hugetlbpage.c139
-rw-r--r--arch/x86/include/asm/elf.h6
-rw-r--r--arch/x86/include/asm/mman.h3
-rw-r--r--arch/x86/kernel/sys_x86_64.c151
-rw-r--r--arch/x86/mm/hugetlbpage.c130
-rw-r--r--arch/x86/vdso/vma.c2
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h11
20 files changed, 272 insertions, 898 deletions
diff --git a/arch/alpha/include/asm/mman.h b/arch/alpha/include/asm/mman.h
index cbeb3616a28e..0086b472bc2b 100644
--- a/arch/alpha/include/asm/mman.h
+++ b/arch/alpha/include/asm/mman.h
@@ -63,4 +63,15 @@
63/* compatibility flags */ 63/* compatibility flags */
64#define MAP_FILE 0 64#define MAP_FILE 0
65 65
66/*
67 * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
68 * This gives us 6 bits, which is enough until someone invents 128 bit address
69 * spaces.
70 *
71 * Assume these are all power of twos.
72 * When 0 use the default page size.
73 */
74#define MAP_HUGE_SHIFT 26
75#define MAP_HUGE_MASK 0x3f
76
66#endif /* __ALPHA_MMAN_H__ */ 77#endif /* __ALPHA_MMAN_H__ */
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 89f2b7f7b042..10062ceadd1c 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -11,18 +11,6 @@
11#include <linux/random.h> 11#include <linux/random.h>
12#include <asm/cachetype.h> 12#include <asm/cachetype.h>
13 13
14static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
15 unsigned long pgoff)
16{
17 unsigned long base = addr & ~(SHMLBA-1);
18 unsigned long off = (pgoff << PAGE_SHIFT) & (SHMLBA-1);
19
20 if (base + off <= addr)
21 return base + off;
22
23 return base - off;
24}
25
26#define COLOUR_ALIGN(addr,pgoff) \ 14#define COLOUR_ALIGN(addr,pgoff) \
27 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 15 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
28 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) 16 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
@@ -69,9 +57,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
69{ 57{
70 struct mm_struct *mm = current->mm; 58 struct mm_struct *mm = current->mm;
71 struct vm_area_struct *vma; 59 struct vm_area_struct *vma;
72 unsigned long start_addr;
73 int do_align = 0; 60 int do_align = 0;
74 int aliasing = cache_is_vipt_aliasing(); 61 int aliasing = cache_is_vipt_aliasing();
62 struct vm_unmapped_area_info info;
75 63
76 /* 64 /*
77 * We only need to do colour alignment if either the I or D 65 * We only need to do colour alignment if either the I or D
@@ -104,46 +92,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
104 (!vma || addr + len <= vma->vm_start)) 92 (!vma || addr + len <= vma->vm_start))
105 return addr; 93 return addr;
106 } 94 }
107 if (len > mm->cached_hole_size) {
108 start_addr = addr = mm->free_area_cache;
109 } else {
110 start_addr = addr = mm->mmap_base;
111 mm->cached_hole_size = 0;
112 }
113 95
114full_search: 96 info.flags = 0;
115 if (do_align) 97 info.length = len;
116 addr = COLOUR_ALIGN(addr, pgoff); 98 info.low_limit = mm->mmap_base;
117 else 99 info.high_limit = TASK_SIZE;
118 addr = PAGE_ALIGN(addr); 100 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
119 101 info.align_offset = pgoff << PAGE_SHIFT;
120 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 102 return vm_unmapped_area(&info);
121 /* At this point: (!vma || addr < vma->vm_end). */
122 if (TASK_SIZE - len < addr) {
123 /*
124 * Start a new search - just in case we missed
125 * some holes.
126 */
127 if (start_addr != TASK_UNMAPPED_BASE) {
128 start_addr = addr = TASK_UNMAPPED_BASE;
129 mm->cached_hole_size = 0;
130 goto full_search;
131 }
132 return -ENOMEM;
133 }
134 if (!vma || addr + len <= vma->vm_start) {
135 /*
136 * Remember the place where we stopped the search:
137 */
138 mm->free_area_cache = addr + len;
139 return addr;
140 }
141 if (addr + mm->cached_hole_size < vma->vm_start)
142 mm->cached_hole_size = vma->vm_start - addr;
143 addr = vma->vm_end;
144 if (do_align)
145 addr = COLOUR_ALIGN(addr, pgoff);
146 }
147} 103}
148 104
149unsigned long 105unsigned long
@@ -156,6 +112,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
156 unsigned long addr = addr0; 112 unsigned long addr = addr0;
157 int do_align = 0; 113 int do_align = 0;
158 int aliasing = cache_is_vipt_aliasing(); 114 int aliasing = cache_is_vipt_aliasing();
115 struct vm_unmapped_area_info info;
159 116
160 /* 117 /*
161 * We only need to do colour alignment if either the I or D 118 * We only need to do colour alignment if either the I or D
@@ -187,70 +144,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
187 return addr; 144 return addr;
188 } 145 }
189 146
190 /* check if free_area_cache is useful for us */ 147 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
191 if (len <= mm->cached_hole_size) { 148 info.length = len;
192 mm->cached_hole_size = 0; 149 info.low_limit = PAGE_SIZE;
193 mm->free_area_cache = mm->mmap_base; 150 info.high_limit = mm->mmap_base;
194 } 151 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
195 152 info.align_offset = pgoff << PAGE_SHIFT;
196 /* either no address requested or can't fit in requested address hole */ 153 addr = vm_unmapped_area(&info);
197 addr = mm->free_area_cache;
198 if (do_align) {
199 unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff);
200 addr = base + len;
201 }
202
203 /* make sure it can fit in the remaining address space */
204 if (addr > len) {
205 vma = find_vma(mm, addr-len);
206 if (!vma || addr <= vma->vm_start)
207 /* remember the address as a hint for next time */
208 return (mm->free_area_cache = addr-len);
209 }
210
211 if (mm->mmap_base < len)
212 goto bottomup;
213
214 addr = mm->mmap_base - len;
215 if (do_align)
216 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
217
218 do {
219 /*
220 * Lookup failure means no vma is above this address,
221 * else if new region fits below vma->vm_start,
222 * return with success:
223 */
224 vma = find_vma(mm, addr);
225 if (!vma || addr+len <= vma->vm_start)
226 /* remember the address as a hint for next time */
227 return (mm->free_area_cache = addr);
228 154
229 /* remember the largest hole we saw so far */
230 if (addr + mm->cached_hole_size < vma->vm_start)
231 mm->cached_hole_size = vma->vm_start - addr;
232
233 /* try just below the current vma->vm_start */
234 addr = vma->vm_start - len;
235 if (do_align)
236 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
237 } while (len < vma->vm_start);
238
239bottomup:
240 /* 155 /*
241 * A failed mmap() very likely causes application failure, 156 * A failed mmap() very likely causes application failure,
242 * so fall back to the bottom-up function here. This scenario 157 * so fall back to the bottom-up function here. This scenario
243 * can happen with large stack limits and large mmap() 158 * can happen with large stack limits and large mmap()
244 * allocations. 159 * allocations.
245 */ 160 */
246 mm->cached_hole_size = ~0UL; 161 if (addr & ~PAGE_MASK) {
247 mm->free_area_cache = TASK_UNMAPPED_BASE; 162 VM_BUG_ON(addr != -ENOMEM);
248 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 163 info.flags = 0;
249 /* 164 info.low_limit = mm->mmap_base;
250 * Restore the topdown base: 165 info.high_limit = TASK_SIZE;
251 */ 166 addr = vm_unmapped_area(&info);
252 mm->free_area_cache = mm->mmap_base; 167 }
253 mm->cached_hole_size = ~0UL;
254 168
255 return addr; 169 return addr;
256} 170}
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 06e73bf665e9..c2bbc9a72222 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -193,9 +193,6 @@ source "kernel/Kconfig.preempt"
193config QUICKLIST 193config QUICKLIST
194 def_bool y 194 def_bool y
195 195
196config HAVE_ARCH_BOOTMEM
197 def_bool n
198
199config ARCH_HAVE_MEMORY_PRESENT 196config ARCH_HAVE_MEMORY_PRESENT
200 def_bool n 197 def_bool n
201 198
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index 46d3da0d4b92..9a936ac9a942 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -87,4 +87,15 @@
87/* compatibility flags */ 87/* compatibility flags */
88#define MAP_FILE 0 88#define MAP_FILE 0
89 89
90/*
91 * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
92 * This gives us 6 bits, which is enough until someone invents 128 bit address
93 * spaces.
94 *
95 * Assume these are all power of twos.
96 * When 0 use the default page size.
97 */
98#define MAP_HUGE_SHIFT 26
99#define MAP_HUGE_MASK 0x3f
100
90#endif /* _ASM_MMAN_H */ 101#endif /* _ASM_MMAN_H */
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 302d779d5b0d..d9be7540a6be 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -45,18 +45,6 @@ static unsigned long mmap_base(unsigned long rnd)
45 return PAGE_ALIGN(TASK_SIZE - gap - rnd); 45 return PAGE_ALIGN(TASK_SIZE - gap - rnd);
46} 46}
47 47
48static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
49 unsigned long pgoff)
50{
51 unsigned long base = addr & ~shm_align_mask;
52 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
53
54 if (base + off <= addr)
55 return base + off;
56
57 return base - off;
58}
59
60#define COLOUR_ALIGN(addr, pgoff) \ 48#define COLOUR_ALIGN(addr, pgoff) \
61 ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 49 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
62 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 50 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
@@ -71,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
71 struct vm_area_struct *vma; 59 struct vm_area_struct *vma;
72 unsigned long addr = addr0; 60 unsigned long addr = addr0;
73 int do_color_align; 61 int do_color_align;
62 struct vm_unmapped_area_info info;
74 63
75 if (unlikely(len > TASK_SIZE)) 64 if (unlikely(len > TASK_SIZE))
76 return -ENOMEM; 65 return -ENOMEM;
@@ -107,97 +96,31 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
107 return addr; 96 return addr;
108 } 97 }
109 98
110 if (dir == UP) { 99 info.length = len;
111 addr = mm->mmap_base; 100 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
112 if (do_color_align) 101 info.align_offset = pgoff << PAGE_SHIFT;
113 addr = COLOUR_ALIGN(addr, pgoff);
114 else
115 addr = PAGE_ALIGN(addr);
116 102
117 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { 103 if (dir == DOWN) {
118 /* At this point: (!vma || addr < vma->vm_end). */ 104 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
119 if (TASK_SIZE - len < addr) 105 info.low_limit = PAGE_SIZE;
120 return -ENOMEM; 106 info.high_limit = mm->mmap_base;
121 if (!vma || addr + len <= vma->vm_start) 107 addr = vm_unmapped_area(&info);
122 return addr; 108
123 addr = vma->vm_end; 109 if (!(addr & ~PAGE_MASK))
124 if (do_color_align) 110 return addr;
125 addr = COLOUR_ALIGN(addr, pgoff);
126 }
127 } else {
128 /* check if free_area_cache is useful for us */
129 if (len <= mm->cached_hole_size) {
130 mm->cached_hole_size = 0;
131 mm->free_area_cache = mm->mmap_base;
132 }
133 111
134 /*
135 * either no address requested, or the mapping can't fit into
136 * the requested address hole
137 */
138 addr = mm->free_area_cache;
139 if (do_color_align) {
140 unsigned long base =
141 COLOUR_ALIGN_DOWN(addr - len, pgoff);
142 addr = base + len;
143 }
144
145 /* make sure it can fit in the remaining address space */
146 if (likely(addr > len)) {
147 vma = find_vma(mm, addr - len);
148 if (!vma || addr <= vma->vm_start) {
149 /* cache the address as a hint for next time */
150 return mm->free_area_cache = addr - len;
151 }
152 }
153
154 if (unlikely(mm->mmap_base < len))
155 goto bottomup;
156
157 addr = mm->mmap_base - len;
158 if (do_color_align)
159 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
160
161 do {
162 /*
163 * Lookup failure means no vma is above this address,
164 * else if new region fits below vma->vm_start,
165 * return with success:
166 */
167 vma = find_vma(mm, addr);
168 if (likely(!vma || addr + len <= vma->vm_start)) {
169 /* cache the address as a hint for next time */
170 return mm->free_area_cache = addr;
171 }
172
173 /* remember the largest hole we saw so far */
174 if (addr + mm->cached_hole_size < vma->vm_start)
175 mm->cached_hole_size = vma->vm_start - addr;
176
177 /* try just below the current vma->vm_start */
178 addr = vma->vm_start - len;
179 if (do_color_align)
180 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
181 } while (likely(len < vma->vm_start));
182
183bottomup:
184 /* 112 /*
185 * A failed mmap() very likely causes application failure, 113 * A failed mmap() very likely causes application failure,
186 * so fall back to the bottom-up function here. This scenario 114 * so fall back to the bottom-up function here. This scenario
187 * can happen with large stack limits and large mmap() 115 * can happen with large stack limits and large mmap()
188 * allocations. 116 * allocations.
189 */ 117 */
190 mm->cached_hole_size = ~0UL;
191 mm->free_area_cache = TASK_UNMAPPED_BASE;
192 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
193 /*
194 * Restore the topdown base:
195 */
196 mm->free_area_cache = mm->mmap_base;
197 mm->cached_hole_size = ~0UL;
198
199 return addr;
200 } 118 }
119
120 info.flags = 0;
121 info.low_limit = mm->mmap_base;
122 info.high_limit = TASK_SIZE;
123 return vm_unmapped_area(&info);
201} 124}
202 125
203unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, 126unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index 12219ebce869..294d251ca7b2 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -70,4 +70,15 @@
70#define MAP_FILE 0 70#define MAP_FILE 0
71#define MAP_VARIABLE 0 71#define MAP_VARIABLE 0
72 72
73/*
74 * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
75 * This gives us 6 bits, which is enough until someone invents 128 bit address
76 * spaces.
77 *
78 * Assume these are all power of twos.
79 * When 0 use the default page size.
80 */
81#define MAP_HUGE_SHIFT 26
82#define MAP_HUGE_MASK 0x3f
83
73#endif /* __PARISC_MMAN_H__ */ 84#endif /* __PARISC_MMAN_H__ */
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index cf357a059ddb..3ce1f864c2d3 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -607,7 +607,7 @@ static void register_nodes(void)
607 607
608int sysfs_add_device_to_node(struct device *dev, int nid) 608int sysfs_add_device_to_node(struct device *dev, int nid)
609{ 609{
610 struct node *node = &node_devices[nid]; 610 struct node *node = node_devices[nid];
611 return sysfs_create_link(&node->dev.kobj, &dev->kobj, 611 return sysfs_create_link(&node->dev.kobj, &dev->kobj,
612 kobject_name(&dev->kobj)); 612 kobject_name(&dev->kobj));
613} 613}
@@ -615,7 +615,7 @@ EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
615 615
616void sysfs_remove_device_from_node(struct device *dev, int nid) 616void sysfs_remove_device_from_node(struct device *dev, int nid)
617{ 617{
618 struct node *node = &node_devices[nid]; 618 struct node *node = node_devices[nid];
619 sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj)); 619 sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
620} 620}
621EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node); 621EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
index abc8af43ea7c..173568140a32 100644
--- a/arch/powerpc/platforms/cell/celleb_pci.c
+++ b/arch/powerpc/platforms/cell/celleb_pci.c
@@ -401,11 +401,11 @@ error:
401 } else { 401 } else {
402 if (config && *config) { 402 if (config && *config) {
403 size = 256; 403 size = 256;
404 free_bootmem((unsigned long)(*config), size); 404 free_bootmem(__pa(*config), size);
405 } 405 }
406 if (res && *res) { 406 if (res && *res) {
407 size = sizeof(struct celleb_pci_resource); 407 size = sizeof(struct celleb_pci_resource);
408 free_bootmem((unsigned long)(*res), size); 408 free_bootmem(__pa(*res), size);
409 } 409 }
410 } 410 }
411 411
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 6d5367060a56..39faa4ac9660 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -158,6 +158,9 @@ static inline int page_reset_referenced(unsigned long addr)
158 * race against modification of the referenced bit. This function 158 * race against modification of the referenced bit. This function
159 * should therefore only be called if it is not mapped in any 159 * should therefore only be called if it is not mapped in any
160 * address space. 160 * address space.
161 *
162 * Note that the bit gets set whenever page content is changed. That means
163 * also when the page is modified by DMA or from inside the kernel.
161 */ 164 */
162#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY 165#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
163static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped) 166static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped)
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 80bf494ddbcb..6777177807c2 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -30,25 +30,13 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr,
30 return base + off; 30 return base + off;
31} 31}
32 32
33static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
34 unsigned long pgoff)
35{
36 unsigned long base = addr & ~shm_align_mask;
37 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
38
39 if (base + off <= addr)
40 return base + off;
41
42 return base - off;
43}
44
45unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 33unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
46 unsigned long len, unsigned long pgoff, unsigned long flags) 34 unsigned long len, unsigned long pgoff, unsigned long flags)
47{ 35{
48 struct mm_struct *mm = current->mm; 36 struct mm_struct *mm = current->mm;
49 struct vm_area_struct *vma; 37 struct vm_area_struct *vma;
50 unsigned long start_addr;
51 int do_colour_align; 38 int do_colour_align;
39 struct vm_unmapped_area_info info;
52 40
53 if (flags & MAP_FIXED) { 41 if (flags & MAP_FIXED) {
54 /* We do not accept a shared mapping if it would violate 42 /* We do not accept a shared mapping if it would violate
@@ -79,47 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
79 return addr; 67 return addr;
80 } 68 }
81 69
82 if (len > mm->cached_hole_size) { 70 info.flags = 0;
83 start_addr = addr = mm->free_area_cache; 71 info.length = len;
84 } else { 72 info.low_limit = TASK_UNMAPPED_BASE;
85 mm->cached_hole_size = 0; 73 info.high_limit = TASK_SIZE;
86 start_addr = addr = TASK_UNMAPPED_BASE; 74 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
87 } 75 info.align_offset = pgoff << PAGE_SHIFT;
88 76 return vm_unmapped_area(&info);
89full_search:
90 if (do_colour_align)
91 addr = COLOUR_ALIGN(addr, pgoff);
92 else
93 addr = PAGE_ALIGN(mm->free_area_cache);
94
95 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
96 /* At this point: (!vma || addr < vma->vm_end). */
97 if (unlikely(TASK_SIZE - len < addr)) {
98 /*
99 * Start a new search - just in case we missed
100 * some holes.
101 */
102 if (start_addr != TASK_UNMAPPED_BASE) {
103 start_addr = addr = TASK_UNMAPPED_BASE;
104 mm->cached_hole_size = 0;
105 goto full_search;
106 }
107 return -ENOMEM;
108 }
109 if (likely(!vma || addr + len <= vma->vm_start)) {
110 /*
111 * Remember the place where we stopped the search:
112 */
113 mm->free_area_cache = addr + len;
114 return addr;
115 }
116 if (addr + mm->cached_hole_size < vma->vm_start)
117 mm->cached_hole_size = vma->vm_start - addr;
118
119 addr = vma->vm_end;
120 if (do_colour_align)
121 addr = COLOUR_ALIGN(addr, pgoff);
122 }
123} 77}
124 78
125unsigned long 79unsigned long
@@ -131,6 +85,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
131 struct mm_struct *mm = current->mm; 85 struct mm_struct *mm = current->mm;
132 unsigned long addr = addr0; 86 unsigned long addr = addr0;
133 int do_colour_align; 87 int do_colour_align;
88 struct vm_unmapped_area_info info;
134 89
135 if (flags & MAP_FIXED) { 90 if (flags & MAP_FIXED) {
136 /* We do not accept a shared mapping if it would violate 91 /* We do not accept a shared mapping if it would violate
@@ -162,73 +117,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
162 return addr; 117 return addr;
163 } 118 }
164 119
165 /* check if free_area_cache is useful for us */ 120 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
166 if (len <= mm->cached_hole_size) { 121 info.length = len;
167 mm->cached_hole_size = 0; 122 info.low_limit = PAGE_SIZE;
168 mm->free_area_cache = mm->mmap_base; 123 info.high_limit = mm->mmap_base;
169 } 124 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
170 125 info.align_offset = pgoff << PAGE_SHIFT;
171 /* either no address requested or can't fit in requested address hole */ 126 addr = vm_unmapped_area(&info);
172 addr = mm->free_area_cache;
173 if (do_colour_align) {
174 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
175 127
176 addr = base + len;
177 }
178
179 /* make sure it can fit in the remaining address space */
180 if (likely(addr > len)) {
181 vma = find_vma(mm, addr-len);
182 if (!vma || addr <= vma->vm_start) {
183 /* remember the address as a hint for next time */
184 return (mm->free_area_cache = addr-len);
185 }
186 }
187
188 if (unlikely(mm->mmap_base < len))
189 goto bottomup;
190
191 addr = mm->mmap_base-len;
192 if (do_colour_align)
193 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
194
195 do {
196 /*
197 * Lookup failure means no vma is above this address,
198 * else if new region fits below vma->vm_start,
199 * return with success:
200 */
201 vma = find_vma(mm, addr);
202 if (likely(!vma || addr+len <= vma->vm_start)) {
203 /* remember the address as a hint for next time */
204 return (mm->free_area_cache = addr);
205 }
206
207 /* remember the largest hole we saw so far */
208 if (addr + mm->cached_hole_size < vma->vm_start)
209 mm->cached_hole_size = vma->vm_start - addr;
210
211 /* try just below the current vma->vm_start */
212 addr = vma->vm_start-len;
213 if (do_colour_align)
214 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
215 } while (likely(len < vma->vm_start));
216
217bottomup:
218 /* 128 /*
219 * A failed mmap() very likely causes application failure, 129 * A failed mmap() very likely causes application failure,
220 * so fall back to the bottom-up function here. This scenario 130 * so fall back to the bottom-up function here. This scenario
221 * can happen with large stack limits and large mmap() 131 * can happen with large stack limits and large mmap()
222 * allocations. 132 * allocations.
223 */ 133 */
224 mm->cached_hole_size = ~0UL; 134 if (addr & ~PAGE_MASK) {
225 mm->free_area_cache = TASK_UNMAPPED_BASE; 135 VM_BUG_ON(addr != -ENOMEM);
226 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 136 info.flags = 0;
227 /* 137 info.low_limit = TASK_UNMAPPED_BASE;
228 * Restore the topdown base: 138 info.high_limit = TASK_SIZE;
229 */ 139 addr = vm_unmapped_area(&info);
230 mm->free_area_cache = mm->mmap_base; 140 }
231 mm->cached_hole_size = ~0UL;
232 141
233 return addr; 142 return addr;
234} 143}
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 0c9b31b22e07..57277c830151 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -34,11 +34,9 @@ asmlinkage unsigned long sys_getpagesize(void)
34 return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */ 34 return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
35} 35}
36 36
37#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
38
39unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) 37unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
40{ 38{
41 struct vm_area_struct * vmm; 39 struct vm_unmapped_area_info info;
42 40
43 if (flags & MAP_FIXED) { 41 if (flags & MAP_FIXED) {
44 /* We do not accept a shared mapping if it would violate 42 /* We do not accept a shared mapping if it would violate
@@ -56,21 +54,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
56 if (!addr) 54 if (!addr)
57 addr = TASK_UNMAPPED_BASE; 55 addr = TASK_UNMAPPED_BASE;
58 56
59 if (flags & MAP_SHARED) 57 info.flags = 0;
60 addr = COLOUR_ALIGN(addr); 58 info.length = len;
61 else 59 info.low_limit = addr;
62 addr = PAGE_ALIGN(addr); 60 info.high_limit = TASK_SIZE;
63 61 info.align_mask = (flags & MAP_SHARED) ?
64 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { 62 (PAGE_MASK & (SHMLBA - 1)) : 0;
65 /* At this point: (!vmm || addr < vmm->vm_end). */ 63 info.align_offset = pgoff << PAGE_SHIFT;
66 if (TASK_SIZE - PAGE_SIZE - len < addr) 64 return vm_unmapped_area(&info);
67 return -ENOMEM;
68 if (!vmm || addr + len <= vmm->vm_start)
69 return addr;
70 addr = vmm->vm_end;
71 if (flags & MAP_SHARED)
72 addr = COLOUR_ALIGN(addr);
73 }
74} 65}
75 66
76/* 67/*
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 878ef3d5fec5..97309c0ec533 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -75,7 +75,7 @@ static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
75 * the spitfire/niagara VA-hole. 75 * the spitfire/niagara VA-hole.
76 */ 76 */
77 77
78static inline unsigned long COLOUR_ALIGN(unsigned long addr, 78static inline unsigned long COLOR_ALIGN(unsigned long addr,
79 unsigned long pgoff) 79 unsigned long pgoff)
80{ 80{
81 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); 81 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
@@ -84,24 +84,13 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr,
84 return base + off; 84 return base + off;
85} 85}
86 86
87static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
88 unsigned long pgoff)
89{
90 unsigned long base = addr & ~(SHMLBA-1);
91 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
92
93 if (base + off <= addr)
94 return base + off;
95 return base - off;
96}
97
98unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) 87unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
99{ 88{
100 struct mm_struct *mm = current->mm; 89 struct mm_struct *mm = current->mm;
101 struct vm_area_struct * vma; 90 struct vm_area_struct * vma;
102 unsigned long task_size = TASK_SIZE; 91 unsigned long task_size = TASK_SIZE;
103 unsigned long start_addr;
104 int do_color_align; 92 int do_color_align;
93 struct vm_unmapped_area_info info;
105 94
106 if (flags & MAP_FIXED) { 95 if (flags & MAP_FIXED) {
107 /* We do not accept a shared mapping if it would violate 96 /* We do not accept a shared mapping if it would violate
@@ -124,7 +113,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
124 113
125 if (addr) { 114 if (addr) {
126 if (do_color_align) 115 if (do_color_align)
127 addr = COLOUR_ALIGN(addr, pgoff); 116 addr = COLOR_ALIGN(addr, pgoff);
128 else 117 else
129 addr = PAGE_ALIGN(addr); 118 addr = PAGE_ALIGN(addr);
130 119
@@ -134,50 +123,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
134 return addr; 123 return addr;
135 } 124 }
136 125
137 if (len > mm->cached_hole_size) { 126 info.flags = 0;
138 start_addr = addr = mm->free_area_cache; 127 info.length = len;
139 } else { 128 info.low_limit = TASK_UNMAPPED_BASE;
140 start_addr = addr = TASK_UNMAPPED_BASE; 129 info.high_limit = min(task_size, VA_EXCLUDE_START);
141 mm->cached_hole_size = 0; 130 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
131 info.align_offset = pgoff << PAGE_SHIFT;
132 addr = vm_unmapped_area(&info);
133
134 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
135 VM_BUG_ON(addr != -ENOMEM);
136 info.low_limit = VA_EXCLUDE_END;
137 info.high_limit = task_size;
138 addr = vm_unmapped_area(&info);
142 } 139 }
143 140
144 task_size -= len; 141 return addr;
145
146full_search:
147 if (do_color_align)
148 addr = COLOUR_ALIGN(addr, pgoff);
149 else
150 addr = PAGE_ALIGN(addr);
151
152 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
153 /* At this point: (!vma || addr < vma->vm_end). */
154 if (addr < VA_EXCLUDE_START &&
155 (addr + len) >= VA_EXCLUDE_START) {
156 addr = VA_EXCLUDE_END;
157 vma = find_vma(mm, VA_EXCLUDE_END);
158 }
159 if (unlikely(task_size < addr)) {
160 if (start_addr != TASK_UNMAPPED_BASE) {
161 start_addr = addr = TASK_UNMAPPED_BASE;
162 mm->cached_hole_size = 0;
163 goto full_search;
164 }
165 return -ENOMEM;
166 }
167 if (likely(!vma || addr + len <= vma->vm_start)) {
168 /*
169 * Remember the place where we stopped the search:
170 */
171 mm->free_area_cache = addr + len;
172 return addr;
173 }
174 if (addr + mm->cached_hole_size < vma->vm_start)
175 mm->cached_hole_size = vma->vm_start - addr;
176
177 addr = vma->vm_end;
178 if (do_color_align)
179 addr = COLOUR_ALIGN(addr, pgoff);
180 }
181} 142}
182 143
183unsigned long 144unsigned long
@@ -190,6 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
190 unsigned long task_size = STACK_TOP32; 151 unsigned long task_size = STACK_TOP32;
191 unsigned long addr = addr0; 152 unsigned long addr = addr0;
192 int do_color_align; 153 int do_color_align;
154 struct vm_unmapped_area_info info;
193 155
194 /* This should only ever run for 32-bit processes. */ 156 /* This should only ever run for 32-bit processes. */
195 BUG_ON(!test_thread_flag(TIF_32BIT)); 157 BUG_ON(!test_thread_flag(TIF_32BIT));
@@ -214,7 +176,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
214 /* requesting a specific address */ 176 /* requesting a specific address */
215 if (addr) { 177 if (addr) {
216 if (do_color_align) 178 if (do_color_align)
217 addr = COLOUR_ALIGN(addr, pgoff); 179 addr = COLOR_ALIGN(addr, pgoff);
218 else 180 else
219 addr = PAGE_ALIGN(addr); 181 addr = PAGE_ALIGN(addr);
220 182
@@ -224,73 +186,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
224 return addr; 186 return addr;
225 } 187 }
226 188
227 /* check if free_area_cache is useful for us */ 189 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
228 if (len <= mm->cached_hole_size) { 190 info.length = len;
229 mm->cached_hole_size = 0; 191 info.low_limit = PAGE_SIZE;
230 mm->free_area_cache = mm->mmap_base; 192 info.high_limit = mm->mmap_base;
231 } 193 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
232 194 info.align_offset = pgoff << PAGE_SHIFT;
233 /* either no address requested or can't fit in requested address hole */ 195 addr = vm_unmapped_area(&info);
234 addr = mm->free_area_cache;
235 if (do_color_align) {
236 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
237 196
238 addr = base + len;
239 }
240
241 /* make sure it can fit in the remaining address space */
242 if (likely(addr > len)) {
243 vma = find_vma(mm, addr-len);
244 if (!vma || addr <= vma->vm_start) {
245 /* remember the address as a hint for next time */
246 return (mm->free_area_cache = addr-len);
247 }
248 }
249
250 if (unlikely(mm->mmap_base < len))
251 goto bottomup;
252
253 addr = mm->mmap_base-len;
254 if (do_color_align)
255 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
256
257 do {
258 /*
259 * Lookup failure means no vma is above this address,
260 * else if new region fits below vma->vm_start,
261 * return with success:
262 */
263 vma = find_vma(mm, addr);
264 if (likely(!vma || addr+len <= vma->vm_start)) {
265 /* remember the address as a hint for next time */
266 return (mm->free_area_cache = addr);
267 }
268
269 /* remember the largest hole we saw so far */
270 if (addr + mm->cached_hole_size < vma->vm_start)
271 mm->cached_hole_size = vma->vm_start - addr;
272
273 /* try just below the current vma->vm_start */
274 addr = vma->vm_start-len;
275 if (do_color_align)
276 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
277 } while (likely(len < vma->vm_start));
278
279bottomup:
280 /* 197 /*
281 * A failed mmap() very likely causes application failure, 198 * A failed mmap() very likely causes application failure,
282 * so fall back to the bottom-up function here. This scenario 199 * so fall back to the bottom-up function here. This scenario
283 * can happen with large stack limits and large mmap() 200 * can happen with large stack limits and large mmap()
284 * allocations. 201 * allocations.
285 */ 202 */
286 mm->cached_hole_size = ~0UL; 203 if (addr & ~PAGE_MASK) {
287 mm->free_area_cache = TASK_UNMAPPED_BASE; 204 VM_BUG_ON(addr != -ENOMEM);
288 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 205 info.flags = 0;
289 /* 206 info.low_limit = TASK_UNMAPPED_BASE;
290 * Restore the topdown base: 207 info.high_limit = STACK_TOP32;
291 */ 208 addr = vm_unmapped_area(&info);
292 mm->free_area_cache = mm->mmap_base; 209 }
293 mm->cached_hole_size = ~0UL;
294 210
295 return addr; 211 return addr;
296} 212}
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index f76f83d5ac63..d2b59441ebdd 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -30,55 +30,28 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
30 unsigned long pgoff, 30 unsigned long pgoff,
31 unsigned long flags) 31 unsigned long flags)
32{ 32{
33 struct mm_struct *mm = current->mm;
34 struct vm_area_struct * vma;
35 unsigned long task_size = TASK_SIZE; 33 unsigned long task_size = TASK_SIZE;
36 unsigned long start_addr; 34 struct vm_unmapped_area_info info;
37 35
38 if (test_thread_flag(TIF_32BIT)) 36 if (test_thread_flag(TIF_32BIT))
39 task_size = STACK_TOP32; 37 task_size = STACK_TOP32;
40 if (unlikely(len >= VA_EXCLUDE_START))
41 return -ENOMEM;
42 38
43 if (len > mm->cached_hole_size) { 39 info.flags = 0;
44 start_addr = addr = mm->free_area_cache; 40 info.length = len;
45 } else { 41 info.low_limit = TASK_UNMAPPED_BASE;
46 start_addr = addr = TASK_UNMAPPED_BASE; 42 info.high_limit = min(task_size, VA_EXCLUDE_START);
47 mm->cached_hole_size = 0; 43 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
44 info.align_offset = 0;
45 addr = vm_unmapped_area(&info);
46
47 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
48 VM_BUG_ON(addr != -ENOMEM);
49 info.low_limit = VA_EXCLUDE_END;
50 info.high_limit = task_size;
51 addr = vm_unmapped_area(&info);
48 } 52 }
49 53
50 task_size -= len; 54 return addr;
51
52full_search:
53 addr = ALIGN(addr, HPAGE_SIZE);
54
55 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
56 /* At this point: (!vma || addr < vma->vm_end). */
57 if (addr < VA_EXCLUDE_START &&
58 (addr + len) >= VA_EXCLUDE_START) {
59 addr = VA_EXCLUDE_END;
60 vma = find_vma(mm, VA_EXCLUDE_END);
61 }
62 if (unlikely(task_size < addr)) {
63 if (start_addr != TASK_UNMAPPED_BASE) {
64 start_addr = addr = TASK_UNMAPPED_BASE;
65 mm->cached_hole_size = 0;
66 goto full_search;
67 }
68 return -ENOMEM;
69 }
70 if (likely(!vma || addr + len <= vma->vm_start)) {
71 /*
72 * Remember the place where we stopped the search:
73 */
74 mm->free_area_cache = addr + len;
75 return addr;
76 }
77 if (addr + mm->cached_hole_size < vma->vm_start)
78 mm->cached_hole_size = vma->vm_start - addr;
79
80 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
81 }
82} 55}
83 56
84static unsigned long 57static unsigned long
@@ -87,71 +60,34 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87 const unsigned long pgoff, 60 const unsigned long pgoff,
88 const unsigned long flags) 61 const unsigned long flags)
89{ 62{
90 struct vm_area_struct *vma;
91 struct mm_struct *mm = current->mm; 63 struct mm_struct *mm = current->mm;
92 unsigned long addr = addr0; 64 unsigned long addr = addr0;
65 struct vm_unmapped_area_info info;
93 66
94 /* This should only ever run for 32-bit processes. */ 67 /* This should only ever run for 32-bit processes. */
95 BUG_ON(!test_thread_flag(TIF_32BIT)); 68 BUG_ON(!test_thread_flag(TIF_32BIT));
96 69
97 /* check if free_area_cache is useful for us */ 70 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
98 if (len <= mm->cached_hole_size) { 71 info.length = len;
99 mm->cached_hole_size = 0; 72 info.low_limit = PAGE_SIZE;
100 mm->free_area_cache = mm->mmap_base; 73 info.high_limit = mm->mmap_base;
101 } 74 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
102 75 info.align_offset = 0;
103 /* either no address requested or can't fit in requested address hole */ 76 addr = vm_unmapped_area(&info);
104 addr = mm->free_area_cache & HPAGE_MASK;
105
106 /* make sure it can fit in the remaining address space */
107 if (likely(addr > len)) {
108 vma = find_vma(mm, addr-len);
109 if (!vma || addr <= vma->vm_start) {
110 /* remember the address as a hint for next time */
111 return (mm->free_area_cache = addr-len);
112 }
113 }
114
115 if (unlikely(mm->mmap_base < len))
116 goto bottomup;
117
118 addr = (mm->mmap_base-len) & HPAGE_MASK;
119
120 do {
121 /*
122 * Lookup failure means no vma is above this address,
123 * else if new region fits below vma->vm_start,
124 * return with success:
125 */
126 vma = find_vma(mm, addr);
127 if (likely(!vma || addr+len <= vma->vm_start)) {
128 /* remember the address as a hint for next time */
129 return (mm->free_area_cache = addr);
130 }
131
132 /* remember the largest hole we saw so far */
133 if (addr + mm->cached_hole_size < vma->vm_start)
134 mm->cached_hole_size = vma->vm_start - addr;
135
136 /* try just below the current vma->vm_start */
137 addr = (vma->vm_start-len) & HPAGE_MASK;
138 } while (likely(len < vma->vm_start));
139 77
140bottomup:
141 /* 78 /*
142 * A failed mmap() very likely causes application failure, 79 * A failed mmap() very likely causes application failure,
143 * so fall back to the bottom-up function here. This scenario 80 * so fall back to the bottom-up function here. This scenario
144 * can happen with large stack limits and large mmap() 81 * can happen with large stack limits and large mmap()
145 * allocations. 82 * allocations.
146 */ 83 */
147 mm->cached_hole_size = ~0UL; 84 if (addr & ~PAGE_MASK) {
148 mm->free_area_cache = TASK_UNMAPPED_BASE; 85 VM_BUG_ON(addr != -ENOMEM);
149 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 86 info.flags = 0;
150 /* 87 info.low_limit = TASK_UNMAPPED_BASE;
151 * Restore the topdown base: 88 info.high_limit = STACK_TOP32;
152 */ 89 addr = vm_unmapped_area(&info);
153 mm->free_area_cache = mm->mmap_base; 90 }
154 mm->cached_hole_size = ~0UL;
155 91
156 return addr; 92 return addr;
157} 93}
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 812e2d037972..650ccff8378c 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -231,42 +231,15 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
231 unsigned long pgoff, unsigned long flags) 231 unsigned long pgoff, unsigned long flags)
232{ 232{
233 struct hstate *h = hstate_file(file); 233 struct hstate *h = hstate_file(file);
234 struct mm_struct *mm = current->mm; 234 struct vm_unmapped_area_info info;
235 struct vm_area_struct *vma; 235
236 unsigned long start_addr; 236 info.flags = 0;
237 237 info.length = len;
238 if (len > mm->cached_hole_size) { 238 info.low_limit = TASK_UNMAPPED_BASE;
239 start_addr = mm->free_area_cache; 239 info.high_limit = TASK_SIZE;
240 } else { 240 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
241 start_addr = TASK_UNMAPPED_BASE; 241 info.align_offset = 0;
242 mm->cached_hole_size = 0; 242 return vm_unmapped_area(&info);
243 }
244
245full_search:
246 addr = ALIGN(start_addr, huge_page_size(h));
247
248 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
249 /* At this point: (!vma || addr < vma->vm_end). */
250 if (TASK_SIZE - len < addr) {
251 /*
252 * Start a new search - just in case we missed
253 * some holes.
254 */
255 if (start_addr != TASK_UNMAPPED_BASE) {
256 start_addr = TASK_UNMAPPED_BASE;
257 mm->cached_hole_size = 0;
258 goto full_search;
259 }
260 return -ENOMEM;
261 }
262 if (!vma || addr + len <= vma->vm_start) {
263 mm->free_area_cache = addr + len;
264 return addr;
265 }
266 if (addr + mm->cached_hole_size < vma->vm_start)
267 mm->cached_hole_size = vma->vm_start - addr;
268 addr = ALIGN(vma->vm_end, huge_page_size(h));
269 }
270} 243}
271 244
272static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 245static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
@@ -274,92 +247,30 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
274 unsigned long pgoff, unsigned long flags) 247 unsigned long pgoff, unsigned long flags)
275{ 248{
276 struct hstate *h = hstate_file(file); 249 struct hstate *h = hstate_file(file);
277 struct mm_struct *mm = current->mm; 250 struct vm_unmapped_area_info info;
278 struct vm_area_struct *vma, *prev_vma; 251 unsigned long addr;
279 unsigned long base = mm->mmap_base, addr = addr0;
280 unsigned long largest_hole = mm->cached_hole_size;
281 int first_time = 1;
282
283 /* don't allow allocations above current base */
284 if (mm->free_area_cache > base)
285 mm->free_area_cache = base;
286
287 if (len <= largest_hole) {
288 largest_hole = 0;
289 mm->free_area_cache = base;
290 }
291try_again:
292 /* make sure it can fit in the remaining address space */
293 if (mm->free_area_cache < len)
294 goto fail;
295
296 /* either no address requested or can't fit in requested address hole */
297 addr = (mm->free_area_cache - len) & huge_page_mask(h);
298 do {
299 /*
300 * Lookup failure means no vma is above this address,
301 * i.e. return with success:
302 */
303 vma = find_vma_prev(mm, addr, &prev_vma);
304 if (!vma) {
305 return addr;
306 break;
307 }
308
309 /*
310 * new region fits between prev_vma->vm_end and
311 * vma->vm_start, use it:
312 */
313 if (addr + len <= vma->vm_start &&
314 (!prev_vma || (addr >= prev_vma->vm_end))) {
315 /* remember the address as a hint for next time */
316 mm->cached_hole_size = largest_hole;
317 mm->free_area_cache = addr;
318 return addr;
319 } else {
320 /* pull free_area_cache down to the first hole */
321 if (mm->free_area_cache == vma->vm_end) {
322 mm->free_area_cache = vma->vm_start;
323 mm->cached_hole_size = largest_hole;
324 }
325 }
326 252
327 /* remember the largest hole we saw so far */ 253 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
328 if (addr + largest_hole < vma->vm_start) 254 info.length = len;
329 largest_hole = vma->vm_start - addr; 255 info.low_limit = PAGE_SIZE;
256 info.high_limit = current->mm->mmap_base;
257 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
258 info.align_offset = 0;
259 addr = vm_unmapped_area(&info);
330 260
331 /* try just below the current vma->vm_start */
332 addr = (vma->vm_start - len) & huge_page_mask(h);
333
334 } while (len <= vma->vm_start);
335
336fail:
337 /*
338 * if hint left us with no space for the requested
339 * mapping then try again:
340 */
341 if (first_time) {
342 mm->free_area_cache = base;
343 largest_hole = 0;
344 first_time = 0;
345 goto try_again;
346 }
347 /* 261 /*
348 * A failed mmap() very likely causes application failure, 262 * A failed mmap() very likely causes application failure,
349 * so fall back to the bottom-up function here. This scenario 263 * so fall back to the bottom-up function here. This scenario
350 * can happen with large stack limits and large mmap() 264 * can happen with large stack limits and large mmap()
351 * allocations. 265 * allocations.
352 */ 266 */
353 mm->free_area_cache = TASK_UNMAPPED_BASE; 267 if (addr & ~PAGE_MASK) {
354 mm->cached_hole_size = ~0UL; 268 VM_BUG_ON(addr != -ENOMEM);
355 addr = hugetlb_get_unmapped_area_bottomup(file, addr0, 269 info.flags = 0;
356 len, pgoff, flags); 270 info.low_limit = TASK_UNMAPPED_BASE;
357 271 info.high_limit = TASK_SIZE;
358 /* 272 addr = vm_unmapped_area(&info);
359 * Restore the topdown base: 273 }
360 */
361 mm->free_area_cache = base;
362 mm->cached_hole_size = ~0UL;
363 274
364 return addr; 275 return addr;
365} 276}
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 5939f44fe0c0..9c999c1674fa 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -354,12 +354,10 @@ static inline int mmap_is_ia32(void)
354 return 0; 354 return 0;
355} 355}
356 356
357/* The first two values are special, do not change. See align_addr() */ 357/* Do not change the values. See get_align_mask() */
358enum align_flags { 358enum align_flags {
359 ALIGN_VA_32 = BIT(0), 359 ALIGN_VA_32 = BIT(0),
360 ALIGN_VA_64 = BIT(1), 360 ALIGN_VA_64 = BIT(1),
361 ALIGN_VDSO = BIT(2),
362 ALIGN_TOPDOWN = BIT(3),
363}; 361};
364 362
365struct va_alignment { 363struct va_alignment {
@@ -368,5 +366,5 @@ struct va_alignment {
368} ____cacheline_aligned; 366} ____cacheline_aligned;
369 367
370extern struct va_alignment va_align; 368extern struct va_alignment va_align;
371extern unsigned long align_addr(unsigned long, struct file *, enum align_flags); 369extern unsigned long align_vdso_addr(unsigned long);
372#endif /* _ASM_X86_ELF_H */ 370#endif /* _ASM_X86_ELF_H */
diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
index 593e51d4643f..513b05f15bb4 100644
--- a/arch/x86/include/asm/mman.h
+++ b/arch/x86/include/asm/mman.h
@@ -3,6 +3,9 @@
3 3
4#define MAP_32BIT 0x40 /* only give out 32bit addresses */ 4#define MAP_32BIT 0x40 /* only give out 32bit addresses */
5 5
6#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT)
7#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT)
8
6#include <asm-generic/mman.h> 9#include <asm-generic/mman.h>
7 10
8#endif /* _ASM_X86_MMAN_H */ 11#endif /* _ASM_X86_MMAN_H */
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index b4d3c3927dd8..97ef74b88e0f 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -21,37 +21,23 @@
21 21
22/* 22/*
23 * Align a virtual address to avoid aliasing in the I$ on AMD F15h. 23 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
24 *
25 * @flags denotes the allocation direction - bottomup or topdown -
26 * or vDSO; see call sites below.
27 */ 24 */
28unsigned long align_addr(unsigned long addr, struct file *filp, 25static unsigned long get_align_mask(void)
29 enum align_flags flags)
30{ 26{
31 unsigned long tmp_addr;
32
33 /* handle 32- and 64-bit case with a single conditional */ 27 /* handle 32- and 64-bit case with a single conditional */
34 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32()))) 28 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
35 return addr; 29 return 0;
36 30
37 if (!(current->flags & PF_RANDOMIZE)) 31 if (!(current->flags & PF_RANDOMIZE))
38 return addr; 32 return 0;
39
40 if (!((flags & ALIGN_VDSO) || filp))
41 return addr;
42
43 tmp_addr = addr;
44
45 /*
46 * We need an address which is <= than the original
47 * one only when in topdown direction.
48 */
49 if (!(flags & ALIGN_TOPDOWN))
50 tmp_addr += va_align.mask;
51 33
52 tmp_addr &= ~va_align.mask; 34 return va_align.mask;
35}
53 36
54 return tmp_addr; 37unsigned long align_vdso_addr(unsigned long addr)
38{
39 unsigned long align_mask = get_align_mask();
40 return (addr + align_mask) & ~align_mask;
55} 41}
56 42
57static int __init control_va_addr_alignment(char *str) 43static int __init control_va_addr_alignment(char *str)
@@ -126,7 +112,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
126{ 112{
127 struct mm_struct *mm = current->mm; 113 struct mm_struct *mm = current->mm;
128 struct vm_area_struct *vma; 114 struct vm_area_struct *vma;
129 unsigned long start_addr; 115 struct vm_unmapped_area_info info;
130 unsigned long begin, end; 116 unsigned long begin, end;
131 117
132 if (flags & MAP_FIXED) 118 if (flags & MAP_FIXED)
@@ -144,50 +130,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
144 (!vma || addr + len <= vma->vm_start)) 130 (!vma || addr + len <= vma->vm_start))
145 return addr; 131 return addr;
146 } 132 }
147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
148 && len <= mm->cached_hole_size) {
149 mm->cached_hole_size = 0;
150 mm->free_area_cache = begin;
151 }
152 addr = mm->free_area_cache;
153 if (addr < begin)
154 addr = begin;
155 start_addr = addr;
156
157full_search:
158
159 addr = align_addr(addr, filp, 0);
160
161 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
162 /* At this point: (!vma || addr < vma->vm_end). */
163 if (end - len < addr) {
164 /*
165 * Start a new search - just in case we missed
166 * some holes.
167 */
168 if (start_addr != begin) {
169 start_addr = addr = begin;
170 mm->cached_hole_size = 0;
171 goto full_search;
172 }
173 return -ENOMEM;
174 }
175 if (!vma || addr + len <= vma->vm_start) {
176 /*
177 * Remember the place where we stopped the search:
178 */
179 mm->free_area_cache = addr + len;
180 return addr;
181 }
182 if (addr + mm->cached_hole_size < vma->vm_start)
183 mm->cached_hole_size = vma->vm_start - addr;
184 133
185 addr = vma->vm_end; 134 info.flags = 0;
186 addr = align_addr(addr, filp, 0); 135 info.length = len;
187 } 136 info.low_limit = begin;
137 info.high_limit = end;
138 info.align_mask = filp ? get_align_mask() : 0;
139 info.align_offset = pgoff << PAGE_SHIFT;
140 return vm_unmapped_area(&info);
188} 141}
189 142
190
191unsigned long 143unsigned long
192arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 144arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
193 const unsigned long len, const unsigned long pgoff, 145 const unsigned long len, const unsigned long pgoff,
@@ -195,7 +147,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
195{ 147{
196 struct vm_area_struct *vma; 148 struct vm_area_struct *vma;
197 struct mm_struct *mm = current->mm; 149 struct mm_struct *mm = current->mm;
198 unsigned long addr = addr0, start_addr; 150 unsigned long addr = addr0;
151 struct vm_unmapped_area_info info;
199 152
200 /* requested length too big for entire address space */ 153 /* requested length too big for entire address space */
201 if (len > TASK_SIZE) 154 if (len > TASK_SIZE)
@@ -217,51 +170,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
217 return addr; 170 return addr;
218 } 171 }
219 172
220 /* check if free_area_cache is useful for us */ 173 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
221 if (len <= mm->cached_hole_size) { 174 info.length = len;
222 mm->cached_hole_size = 0; 175 info.low_limit = PAGE_SIZE;
223 mm->free_area_cache = mm->mmap_base; 176 info.high_limit = mm->mmap_base;
224 } 177 info.align_mask = filp ? get_align_mask() : 0;
225 178 info.align_offset = pgoff << PAGE_SHIFT;
226try_again: 179 addr = vm_unmapped_area(&info);
227 /* either no address requested or can't fit in requested address hole */ 180 if (!(addr & ~PAGE_MASK))
228 start_addr = addr = mm->free_area_cache; 181 return addr;
229 182 VM_BUG_ON(addr != -ENOMEM);
230 if (addr < len)
231 goto fail;
232
233 addr -= len;
234 do {
235 addr = align_addr(addr, filp, ALIGN_TOPDOWN);
236
237 /*
238 * Lookup failure means no vma is above this address,
239 * else if new region fits below vma->vm_start,
240 * return with success:
241 */
242 vma = find_vma(mm, addr);
243 if (!vma || addr+len <= vma->vm_start)
244 /* remember the address as a hint for next time */
245 return mm->free_area_cache = addr;
246
247 /* remember the largest hole we saw so far */
248 if (addr + mm->cached_hole_size < vma->vm_start)
249 mm->cached_hole_size = vma->vm_start - addr;
250
251 /* try just below the current vma->vm_start */
252 addr = vma->vm_start-len;
253 } while (len < vma->vm_start);
254
255fail:
256 /*
257 * if hint left us with no space for the requested
258 * mapping then try again:
259 */
260 if (start_addr != mm->mmap_base) {
261 mm->free_area_cache = mm->mmap_base;
262 mm->cached_hole_size = 0;
263 goto try_again;
264 }
265 183
266bottomup: 184bottomup:
267 /* 185 /*
@@ -270,14 +188,5 @@ bottomup:
270 * can happen with large stack limits and large mmap() 188 * can happen with large stack limits and large mmap()
271 * allocations. 189 * allocations.
272 */ 190 */
273 mm->cached_hole_size = ~0UL; 191 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
274 mm->free_area_cache = TASK_UNMAPPED_BASE;
275 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
276 /*
277 * Restore the topdown base:
278 */
279 mm->free_area_cache = mm->mmap_base;
280 mm->cached_hole_size = ~0UL;
281
282 return addr;
283} 192}
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 937bff5cdaa7..ae1aa71d0115 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -274,42 +274,15 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
274 unsigned long pgoff, unsigned long flags) 274 unsigned long pgoff, unsigned long flags)
275{ 275{
276 struct hstate *h = hstate_file(file); 276 struct hstate *h = hstate_file(file);
277 struct mm_struct *mm = current->mm; 277 struct vm_unmapped_area_info info;
278 struct vm_area_struct *vma; 278
279 unsigned long start_addr; 279 info.flags = 0;
280 280 info.length = len;
281 if (len > mm->cached_hole_size) { 281 info.low_limit = TASK_UNMAPPED_BASE;
282 start_addr = mm->free_area_cache; 282 info.high_limit = TASK_SIZE;
283 } else { 283 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
284 start_addr = TASK_UNMAPPED_BASE; 284 info.align_offset = 0;
285 mm->cached_hole_size = 0; 285 return vm_unmapped_area(&info);
286 }
287
288full_search:
289 addr = ALIGN(start_addr, huge_page_size(h));
290
291 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
292 /* At this point: (!vma || addr < vma->vm_end). */
293 if (TASK_SIZE - len < addr) {
294 /*
295 * Start a new search - just in case we missed
296 * some holes.
297 */
298 if (start_addr != TASK_UNMAPPED_BASE) {
299 start_addr = TASK_UNMAPPED_BASE;
300 mm->cached_hole_size = 0;
301 goto full_search;
302 }
303 return -ENOMEM;
304 }
305 if (!vma || addr + len <= vma->vm_start) {
306 mm->free_area_cache = addr + len;
307 return addr;
308 }
309 if (addr + mm->cached_hole_size < vma->vm_start)
310 mm->cached_hole_size = vma->vm_start - addr;
311 addr = ALIGN(vma->vm_end, huge_page_size(h));
312 }
313} 286}
314 287
315static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 288static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
@@ -317,83 +290,30 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
317 unsigned long pgoff, unsigned long flags) 290 unsigned long pgoff, unsigned long flags)
318{ 291{
319 struct hstate *h = hstate_file(file); 292 struct hstate *h = hstate_file(file);
320 struct mm_struct *mm = current->mm; 293 struct vm_unmapped_area_info info;
321 struct vm_area_struct *vma; 294 unsigned long addr;
322 unsigned long base = mm->mmap_base;
323 unsigned long addr = addr0;
324 unsigned long largest_hole = mm->cached_hole_size;
325 unsigned long start_addr;
326
327 /* don't allow allocations above current base */
328 if (mm->free_area_cache > base)
329 mm->free_area_cache = base;
330
331 if (len <= largest_hole) {
332 largest_hole = 0;
333 mm->free_area_cache = base;
334 }
335try_again:
336 start_addr = mm->free_area_cache;
337
338 /* make sure it can fit in the remaining address space */
339 if (mm->free_area_cache < len)
340 goto fail;
341
342 /* either no address requested or can't fit in requested address hole */
343 addr = (mm->free_area_cache - len) & huge_page_mask(h);
344 do {
345 /*
346 * Lookup failure means no vma is above this address,
347 * i.e. return with success:
348 */
349 vma = find_vma(mm, addr);
350 if (!vma)
351 return addr;
352 295
353 if (addr + len <= vma->vm_start) { 296 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
354 /* remember the address as a hint for next time */ 297 info.length = len;
355 mm->cached_hole_size = largest_hole; 298 info.low_limit = PAGE_SIZE;
356 return (mm->free_area_cache = addr); 299 info.high_limit = current->mm->mmap_base;
357 } else if (mm->free_area_cache == vma->vm_end) { 300 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
358 /* pull free_area_cache down to the first hole */ 301 info.align_offset = 0;
359 mm->free_area_cache = vma->vm_start; 302 addr = vm_unmapped_area(&info);
360 mm->cached_hole_size = largest_hole;
361 }
362 303
363 /* remember the largest hole we saw so far */
364 if (addr + largest_hole < vma->vm_start)
365 largest_hole = vma->vm_start - addr;
366
367 /* try just below the current vma->vm_start */
368 addr = (vma->vm_start - len) & huge_page_mask(h);
369 } while (len <= vma->vm_start);
370
371fail:
372 /*
373 * if hint left us with no space for the requested
374 * mapping then try again:
375 */
376 if (start_addr != base) {
377 mm->free_area_cache = base;
378 largest_hole = 0;
379 goto try_again;
380 }
381 /* 304 /*
382 * A failed mmap() very likely causes application failure, 305 * A failed mmap() very likely causes application failure,
383 * so fall back to the bottom-up function here. This scenario 306 * so fall back to the bottom-up function here. This scenario
384 * can happen with large stack limits and large mmap() 307 * can happen with large stack limits and large mmap()
385 * allocations. 308 * allocations.
386 */ 309 */
387 mm->free_area_cache = TASK_UNMAPPED_BASE; 310 if (addr & ~PAGE_MASK) {
388 mm->cached_hole_size = ~0UL; 311 VM_BUG_ON(addr != -ENOMEM);
389 addr = hugetlb_get_unmapped_area_bottomup(file, addr0, 312 info.flags = 0;
390 len, pgoff, flags); 313 info.low_limit = TASK_UNMAPPED_BASE;
391 314 info.high_limit = TASK_SIZE;
392 /* 315 addr = vm_unmapped_area(&info);
393 * Restore the topdown base: 316 }
394 */
395 mm->free_area_cache = base;
396 mm->cached_hole_size = ~0UL;
397 317
398 return addr; 318 return addr;
399} 319}
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 00aaf047b39f..431e87544411 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -141,7 +141,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
141 * unaligned here as a result of stack start randomization. 141 * unaligned here as a result of stack start randomization.
142 */ 142 */
143 addr = PAGE_ALIGN(addr); 143 addr = PAGE_ALIGN(addr);
144 addr = align_addr(addr, NULL, ALIGN_VDSO); 144 addr = align_vdso_addr(addr);
145 145
146 return addr; 146 return addr;
147} 147}
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index 25bc6c1309c3..00eed6786d7e 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -93,4 +93,15 @@
93/* compatibility flags */ 93/* compatibility flags */
94#define MAP_FILE 0 94#define MAP_FILE 0
95 95
96/*
97 * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
98 * This gives us 6 bits, which is enough until someone invents 128 bit address
99 * spaces.
100 *
101 * Assume these are all power of twos.
102 * When 0 use the default page size.
103 */
104#define MAP_HUGE_SHIFT 26
105#define MAP_HUGE_MASK 0x3f
106
96#endif /* _XTENSA_MMAN_H */ 107#endif /* _XTENSA_MMAN_H */