aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 21:05:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 21:05:37 -0500
commit608ff1a210ab0e8b969399039bf8e18693605910 (patch)
treefaea7bb1764461c73d0953089bd5439d91733a03 /arch/sparc
parent414a6750e59b0b687034764c464e9ddecac0f7a6 (diff)
parent74d42d8fe146e870c52bde3b1c692f86cc8ff844 (diff)
Merge branch 'akpm' (Andrew's patchbomb)
Merge misc updates from Andrew Morton: "About half of most of MM. Going very early this time due to uncertainty over the coreautounifiednumasched things. I'll send the other half of most of MM tomorrow. The rest of MM awaits a slab merge from Pekka." * emailed patches from Andrew Morton: (71 commits) memory_hotplug: ensure every online node has NORMAL memory memory_hotplug: handle empty zone when online_movable/online_kernel mm, memory-hotplug: dynamic configure movable memory and portion memory drivers/base/node.c: cleanup node_state_attr[] bootmem: fix wrong call parameter for free_bootmem() avr32, kconfig: remove HAVE_ARCH_BOOTMEM mm: cma: remove watermark hacks mm: cma: skip watermarks check for already isolated blocks in split_free_page() mm, oom: fix race when specifying a thread as the oom origin mm, oom: change type of oom_score_adj to short mm: cleanup register_node() mm, mempolicy: remove duplicate code mm/vmscan.c: try_to_freeze() returns boolean mm: introduce putback_movable_pages() virtio_balloon: introduce migration primitives to balloon pages mm: introduce compaction and migration for ballooned pages mm: introduce a common interface for balloon pages mobility mm: redefine address_space.assoc_mapping mm: adjust address_space_operations.migratepage() return code arch/sparc/kernel/sys_sparc_64.c: s/COLOUR/COLOR/ ...
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c27
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c150
-rw-r--r--arch/sparc/mm/hugetlbpage.c124
3 files changed, 72 insertions, 229 deletions
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 0c9b31b22e07..57277c830151 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -34,11 +34,9 @@ asmlinkage unsigned long sys_getpagesize(void)
34 return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */ 34 return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
35} 35}
36 36
37#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
38
39unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) 37unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
40{ 38{
41 struct vm_area_struct * vmm; 39 struct vm_unmapped_area_info info;
42 40
43 if (flags & MAP_FIXED) { 41 if (flags & MAP_FIXED) {
44 /* We do not accept a shared mapping if it would violate 42 /* We do not accept a shared mapping if it would violate
@@ -56,21 +54,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
56 if (!addr) 54 if (!addr)
57 addr = TASK_UNMAPPED_BASE; 55 addr = TASK_UNMAPPED_BASE;
58 56
59 if (flags & MAP_SHARED) 57 info.flags = 0;
60 addr = COLOUR_ALIGN(addr); 58 info.length = len;
61 else 59 info.low_limit = addr;
62 addr = PAGE_ALIGN(addr); 60 info.high_limit = TASK_SIZE;
63 61 info.align_mask = (flags & MAP_SHARED) ?
64 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { 62 (PAGE_MASK & (SHMLBA - 1)) : 0;
65 /* At this point: (!vmm || addr < vmm->vm_end). */ 63 info.align_offset = pgoff << PAGE_SHIFT;
66 if (TASK_SIZE - PAGE_SIZE - len < addr) 64 return vm_unmapped_area(&info);
67 return -ENOMEM;
68 if (!vmm || addr + len <= vmm->vm_start)
69 return addr;
70 addr = vmm->vm_end;
71 if (flags & MAP_SHARED)
72 addr = COLOUR_ALIGN(addr);
73 }
74} 65}
75 66
76/* 67/*
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 878ef3d5fec5..97309c0ec533 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -75,7 +75,7 @@ static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
75 * the spitfire/niagara VA-hole. 75 * the spitfire/niagara VA-hole.
76 */ 76 */
77 77
78static inline unsigned long COLOUR_ALIGN(unsigned long addr, 78static inline unsigned long COLOR_ALIGN(unsigned long addr,
79 unsigned long pgoff) 79 unsigned long pgoff)
80{ 80{
81 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); 81 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
@@ -84,24 +84,13 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr,
84 return base + off; 84 return base + off;
85} 85}
86 86
87static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
88 unsigned long pgoff)
89{
90 unsigned long base = addr & ~(SHMLBA-1);
91 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
92
93 if (base + off <= addr)
94 return base + off;
95 return base - off;
96}
97
98unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) 87unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
99{ 88{
100 struct mm_struct *mm = current->mm; 89 struct mm_struct *mm = current->mm;
101 struct vm_area_struct * vma; 90 struct vm_area_struct * vma;
102 unsigned long task_size = TASK_SIZE; 91 unsigned long task_size = TASK_SIZE;
103 unsigned long start_addr;
104 int do_color_align; 92 int do_color_align;
93 struct vm_unmapped_area_info info;
105 94
106 if (flags & MAP_FIXED) { 95 if (flags & MAP_FIXED) {
107 /* We do not accept a shared mapping if it would violate 96 /* We do not accept a shared mapping if it would violate
@@ -124,7 +113,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
124 113
125 if (addr) { 114 if (addr) {
126 if (do_color_align) 115 if (do_color_align)
127 addr = COLOUR_ALIGN(addr, pgoff); 116 addr = COLOR_ALIGN(addr, pgoff);
128 else 117 else
129 addr = PAGE_ALIGN(addr); 118 addr = PAGE_ALIGN(addr);
130 119
@@ -134,50 +123,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
134 return addr; 123 return addr;
135 } 124 }
136 125
137 if (len > mm->cached_hole_size) { 126 info.flags = 0;
138 start_addr = addr = mm->free_area_cache; 127 info.length = len;
139 } else { 128 info.low_limit = TASK_UNMAPPED_BASE;
140 start_addr = addr = TASK_UNMAPPED_BASE; 129 info.high_limit = min(task_size, VA_EXCLUDE_START);
141 mm->cached_hole_size = 0; 130 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
131 info.align_offset = pgoff << PAGE_SHIFT;
132 addr = vm_unmapped_area(&info);
133
134 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
135 VM_BUG_ON(addr != -ENOMEM);
136 info.low_limit = VA_EXCLUDE_END;
137 info.high_limit = task_size;
138 addr = vm_unmapped_area(&info);
142 } 139 }
143 140
144 task_size -= len; 141 return addr;
145
146full_search:
147 if (do_color_align)
148 addr = COLOUR_ALIGN(addr, pgoff);
149 else
150 addr = PAGE_ALIGN(addr);
151
152 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
153 /* At this point: (!vma || addr < vma->vm_end). */
154 if (addr < VA_EXCLUDE_START &&
155 (addr + len) >= VA_EXCLUDE_START) {
156 addr = VA_EXCLUDE_END;
157 vma = find_vma(mm, VA_EXCLUDE_END);
158 }
159 if (unlikely(task_size < addr)) {
160 if (start_addr != TASK_UNMAPPED_BASE) {
161 start_addr = addr = TASK_UNMAPPED_BASE;
162 mm->cached_hole_size = 0;
163 goto full_search;
164 }
165 return -ENOMEM;
166 }
167 if (likely(!vma || addr + len <= vma->vm_start)) {
168 /*
169 * Remember the place where we stopped the search:
170 */
171 mm->free_area_cache = addr + len;
172 return addr;
173 }
174 if (addr + mm->cached_hole_size < vma->vm_start)
175 mm->cached_hole_size = vma->vm_start - addr;
176
177 addr = vma->vm_end;
178 if (do_color_align)
179 addr = COLOUR_ALIGN(addr, pgoff);
180 }
181} 142}
182 143
183unsigned long 144unsigned long
@@ -190,6 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
190 unsigned long task_size = STACK_TOP32; 151 unsigned long task_size = STACK_TOP32;
191 unsigned long addr = addr0; 152 unsigned long addr = addr0;
192 int do_color_align; 153 int do_color_align;
154 struct vm_unmapped_area_info info;
193 155
194 /* This should only ever run for 32-bit processes. */ 156 /* This should only ever run for 32-bit processes. */
195 BUG_ON(!test_thread_flag(TIF_32BIT)); 157 BUG_ON(!test_thread_flag(TIF_32BIT));
@@ -214,7 +176,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
214 /* requesting a specific address */ 176 /* requesting a specific address */
215 if (addr) { 177 if (addr) {
216 if (do_color_align) 178 if (do_color_align)
217 addr = COLOUR_ALIGN(addr, pgoff); 179 addr = COLOR_ALIGN(addr, pgoff);
218 else 180 else
219 addr = PAGE_ALIGN(addr); 181 addr = PAGE_ALIGN(addr);
220 182
@@ -224,73 +186,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
224 return addr; 186 return addr;
225 } 187 }
226 188
227 /* check if free_area_cache is useful for us */ 189 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
228 if (len <= mm->cached_hole_size) { 190 info.length = len;
229 mm->cached_hole_size = 0; 191 info.low_limit = PAGE_SIZE;
230 mm->free_area_cache = mm->mmap_base; 192 info.high_limit = mm->mmap_base;
231 } 193 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
232 194 info.align_offset = pgoff << PAGE_SHIFT;
233 /* either no address requested or can't fit in requested address hole */ 195 addr = vm_unmapped_area(&info);
234 addr = mm->free_area_cache;
235 if (do_color_align) {
236 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
237 196
238 addr = base + len;
239 }
240
241 /* make sure it can fit in the remaining address space */
242 if (likely(addr > len)) {
243 vma = find_vma(mm, addr-len);
244 if (!vma || addr <= vma->vm_start) {
245 /* remember the address as a hint for next time */
246 return (mm->free_area_cache = addr-len);
247 }
248 }
249
250 if (unlikely(mm->mmap_base < len))
251 goto bottomup;
252
253 addr = mm->mmap_base-len;
254 if (do_color_align)
255 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
256
257 do {
258 /*
259 * Lookup failure means no vma is above this address,
260 * else if new region fits below vma->vm_start,
261 * return with success:
262 */
263 vma = find_vma(mm, addr);
264 if (likely(!vma || addr+len <= vma->vm_start)) {
265 /* remember the address as a hint for next time */
266 return (mm->free_area_cache = addr);
267 }
268
269 /* remember the largest hole we saw so far */
270 if (addr + mm->cached_hole_size < vma->vm_start)
271 mm->cached_hole_size = vma->vm_start - addr;
272
273 /* try just below the current vma->vm_start */
274 addr = vma->vm_start-len;
275 if (do_color_align)
276 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
277 } while (likely(len < vma->vm_start));
278
279bottomup:
280 /* 197 /*
281 * A failed mmap() very likely causes application failure, 198 * A failed mmap() very likely causes application failure,
282 * so fall back to the bottom-up function here. This scenario 199 * so fall back to the bottom-up function here. This scenario
283 * can happen with large stack limits and large mmap() 200 * can happen with large stack limits and large mmap()
284 * allocations. 201 * allocations.
285 */ 202 */
286 mm->cached_hole_size = ~0UL; 203 if (addr & ~PAGE_MASK) {
287 mm->free_area_cache = TASK_UNMAPPED_BASE; 204 VM_BUG_ON(addr != -ENOMEM);
288 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 205 info.flags = 0;
289 /* 206 info.low_limit = TASK_UNMAPPED_BASE;
290 * Restore the topdown base: 207 info.high_limit = STACK_TOP32;
291 */ 208 addr = vm_unmapped_area(&info);
292 mm->free_area_cache = mm->mmap_base; 209 }
293 mm->cached_hole_size = ~0UL;
294 210
295 return addr; 211 return addr;
296} 212}
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index f76f83d5ac63..d2b59441ebdd 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -30,55 +30,28 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
30 unsigned long pgoff, 30 unsigned long pgoff,
31 unsigned long flags) 31 unsigned long flags)
32{ 32{
33 struct mm_struct *mm = current->mm;
34 struct vm_area_struct * vma;
35 unsigned long task_size = TASK_SIZE; 33 unsigned long task_size = TASK_SIZE;
36 unsigned long start_addr; 34 struct vm_unmapped_area_info info;
37 35
38 if (test_thread_flag(TIF_32BIT)) 36 if (test_thread_flag(TIF_32BIT))
39 task_size = STACK_TOP32; 37 task_size = STACK_TOP32;
40 if (unlikely(len >= VA_EXCLUDE_START))
41 return -ENOMEM;
42 38
43 if (len > mm->cached_hole_size) { 39 info.flags = 0;
44 start_addr = addr = mm->free_area_cache; 40 info.length = len;
45 } else { 41 info.low_limit = TASK_UNMAPPED_BASE;
46 start_addr = addr = TASK_UNMAPPED_BASE; 42 info.high_limit = min(task_size, VA_EXCLUDE_START);
47 mm->cached_hole_size = 0; 43 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
44 info.align_offset = 0;
45 addr = vm_unmapped_area(&info);
46
47 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
48 VM_BUG_ON(addr != -ENOMEM);
49 info.low_limit = VA_EXCLUDE_END;
50 info.high_limit = task_size;
51 addr = vm_unmapped_area(&info);
48 } 52 }
49 53
50 task_size -= len; 54 return addr;
51
52full_search:
53 addr = ALIGN(addr, HPAGE_SIZE);
54
55 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
56 /* At this point: (!vma || addr < vma->vm_end). */
57 if (addr < VA_EXCLUDE_START &&
58 (addr + len) >= VA_EXCLUDE_START) {
59 addr = VA_EXCLUDE_END;
60 vma = find_vma(mm, VA_EXCLUDE_END);
61 }
62 if (unlikely(task_size < addr)) {
63 if (start_addr != TASK_UNMAPPED_BASE) {
64 start_addr = addr = TASK_UNMAPPED_BASE;
65 mm->cached_hole_size = 0;
66 goto full_search;
67 }
68 return -ENOMEM;
69 }
70 if (likely(!vma || addr + len <= vma->vm_start)) {
71 /*
72 * Remember the place where we stopped the search:
73 */
74 mm->free_area_cache = addr + len;
75 return addr;
76 }
77 if (addr + mm->cached_hole_size < vma->vm_start)
78 mm->cached_hole_size = vma->vm_start - addr;
79
80 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
81 }
82} 55}
83 56
84static unsigned long 57static unsigned long
@@ -87,71 +60,34 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87 const unsigned long pgoff, 60 const unsigned long pgoff,
88 const unsigned long flags) 61 const unsigned long flags)
89{ 62{
90 struct vm_area_struct *vma;
91 struct mm_struct *mm = current->mm; 63 struct mm_struct *mm = current->mm;
92 unsigned long addr = addr0; 64 unsigned long addr = addr0;
65 struct vm_unmapped_area_info info;
93 66
94 /* This should only ever run for 32-bit processes. */ 67 /* This should only ever run for 32-bit processes. */
95 BUG_ON(!test_thread_flag(TIF_32BIT)); 68 BUG_ON(!test_thread_flag(TIF_32BIT));
96 69
97 /* check if free_area_cache is useful for us */ 70 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
98 if (len <= mm->cached_hole_size) { 71 info.length = len;
99 mm->cached_hole_size = 0; 72 info.low_limit = PAGE_SIZE;
100 mm->free_area_cache = mm->mmap_base; 73 info.high_limit = mm->mmap_base;
101 } 74 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
102 75 info.align_offset = 0;
103 /* either no address requested or can't fit in requested address hole */ 76 addr = vm_unmapped_area(&info);
104 addr = mm->free_area_cache & HPAGE_MASK;
105
106 /* make sure it can fit in the remaining address space */
107 if (likely(addr > len)) {
108 vma = find_vma(mm, addr-len);
109 if (!vma || addr <= vma->vm_start) {
110 /* remember the address as a hint for next time */
111 return (mm->free_area_cache = addr-len);
112 }
113 }
114
115 if (unlikely(mm->mmap_base < len))
116 goto bottomup;
117
118 addr = (mm->mmap_base-len) & HPAGE_MASK;
119
120 do {
121 /*
122 * Lookup failure means no vma is above this address,
123 * else if new region fits below vma->vm_start,
124 * return with success:
125 */
126 vma = find_vma(mm, addr);
127 if (likely(!vma || addr+len <= vma->vm_start)) {
128 /* remember the address as a hint for next time */
129 return (mm->free_area_cache = addr);
130 }
131
132 /* remember the largest hole we saw so far */
133 if (addr + mm->cached_hole_size < vma->vm_start)
134 mm->cached_hole_size = vma->vm_start - addr;
135
136 /* try just below the current vma->vm_start */
137 addr = (vma->vm_start-len) & HPAGE_MASK;
138 } while (likely(len < vma->vm_start));
139 77
140bottomup:
141 /* 78 /*
142 * A failed mmap() very likely causes application failure, 79 * A failed mmap() very likely causes application failure,
143 * so fall back to the bottom-up function here. This scenario 80 * so fall back to the bottom-up function here. This scenario
144 * can happen with large stack limits and large mmap() 81 * can happen with large stack limits and large mmap()
145 * allocations. 82 * allocations.
146 */ 83 */
147 mm->cached_hole_size = ~0UL; 84 if (addr & ~PAGE_MASK) {
148 mm->free_area_cache = TASK_UNMAPPED_BASE; 85 VM_BUG_ON(addr != -ENOMEM);
149 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 86 info.flags = 0;
150 /* 87 info.low_limit = TASK_UNMAPPED_BASE;
151 * Restore the topdown base: 88 info.high_limit = STACK_TOP32;
152 */ 89 addr = vm_unmapped_area(&info);
153 mm->free_area_cache = mm->mmap_base; 90 }
154 mm->cached_hole_size = ~0UL;
155 91
156 return addr; 92 return addr;
157} 93}