aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/boot/piggyback.c12
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/prom.h5
-rw-r--r--arch/sparc/include/uapi/asm/ioctls.h3
-rw-r--r--arch/sparc/kernel/pci_impl.h2
-rw-r--r--arch/sparc/kernel/signal_64.c4
-rw-r--r--arch/sparc/kernel/sys32.S2
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c27
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c150
-rw-r--r--arch/sparc/kernel/syscalls.S14
-rw-r--r--arch/sparc/kernel/systbls_64.S2
-rw-r--r--arch/sparc/mm/hugetlbpage.c124
12 files changed, 102 insertions, 244 deletions
diff --git a/arch/sparc/boot/piggyback.c b/arch/sparc/boot/piggyback.c
index c0a798fcf030..bb7c95161d71 100644
--- a/arch/sparc/boot/piggyback.c
+++ b/arch/sparc/boot/piggyback.c
@@ -81,18 +81,18 @@ static void usage(void)
81 81
82static int start_line(const char *line) 82static int start_line(const char *line)
83{ 83{
84 if (strcmp(line + 8, " T _start\n") == 0) 84 if (strcmp(line + 10, " _start\n") == 0)
85 return 1; 85 return 1;
86 else if (strcmp(line + 16, " T _start\n") == 0) 86 else if (strcmp(line + 18, " _start\n") == 0)
87 return 1; 87 return 1;
88 return 0; 88 return 0;
89} 89}
90 90
91static int end_line(const char *line) 91static int end_line(const char *line)
92{ 92{
93 if (strcmp(line + 8, " A _end\n") == 0) 93 if (strcmp(line + 10, " _end\n") == 0)
94 return 1; 94 return 1;
95 else if (strcmp (line + 16, " A _end\n") == 0) 95 else if (strcmp (line + 18, " _end\n") == 0)
96 return 1; 96 return 1;
97 return 0; 97 return 0;
98} 98}
@@ -100,8 +100,8 @@ static int end_line(const char *line)
100/* 100/*
101 * Find address for start and end in System.map. 101 * Find address for start and end in System.map.
102 * The file looks like this: 102 * The file looks like this:
103 * f0004000 T _start 103 * f0004000 ... _start
104 * f0379f79 A _end 104 * f0379f79 ... _end
105 * 1234567890123456 105 * 1234567890123456
106 * ^coloumn 1 106 * ^coloumn 1
107 * There is support for 64 bit addresses too. 107 * There is support for 64 bit addresses too.
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 645a58da0e86..e26d430ce2fd 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -8,4 +8,5 @@ generic-y += local64.h
8generic-y += irq_regs.h 8generic-y += irq_regs.h
9generic-y += local.h 9generic-y += local.h
10generic-y += module.h 10generic-y += module.h
11generic-y += trace_clock.h
11generic-y += word-at-a-time.h 12generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index f93003123bce..67c62578d170 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -63,10 +63,13 @@ extern char *of_console_options;
63extern void irq_trans_init(struct device_node *dp); 63extern void irq_trans_init(struct device_node *dp);
64extern char *build_path_component(struct device_node *dp); 64extern char *build_path_component(struct device_node *dp);
65 65
66/* SPARC has a local implementation */ 66/* SPARC has local implementations */
67extern int of_address_to_resource(struct device_node *dev, int index, 67extern int of_address_to_resource(struct device_node *dev, int index,
68 struct resource *r); 68 struct resource *r);
69#define of_address_to_resource of_address_to_resource 69#define of_address_to_resource of_address_to_resource
70 70
71void __iomem *of_iomap(struct device_node *node, int index);
72#define of_iomap of_iomap
73
71#endif /* __KERNEL__ */ 74#endif /* __KERNEL__ */
72#endif /* _SPARC_PROM_H */ 75#endif /* _SPARC_PROM_H */
diff --git a/arch/sparc/include/uapi/asm/ioctls.h b/arch/sparc/include/uapi/asm/ioctls.h
index 9155f7041d44..897d1723fa14 100644
--- a/arch/sparc/include/uapi/asm/ioctls.h
+++ b/arch/sparc/include/uapi/asm/ioctls.h
@@ -21,6 +21,9 @@
21#define TCSETSF2 _IOW('T', 15, struct termios2) 21#define TCSETSF2 _IOW('T', 15, struct termios2)
22#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ 22#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */
23#define TIOCVHANGUP _IO('T', 0x37) 23#define TIOCVHANGUP _IO('T', 0x37)
24#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
25#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
26#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
24 27
25/* Note that all the ioctls that are not available in Linux have a 28/* Note that all the ioctls that are not available in Linux have a
26 * double underscore on the front to: a) avoid some programs to 29 * double underscore on the front to: a) avoid some programs to
diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h
index 918a2031c8bb..5f688531f48c 100644
--- a/arch/sparc/kernel/pci_impl.h
+++ b/arch/sparc/kernel/pci_impl.h
@@ -88,7 +88,7 @@ struct pci_pbm_info {
88 int chip_revision; 88 int chip_revision;
89 89
90 /* Name used for top-level resources. */ 90 /* Name used for top-level resources. */
91 char *name; 91 const char *name;
92 92
93 /* OBP specific information. */ 93 /* OBP specific information. */
94 struct platform_device *op; 94 struct platform_device *op;
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 867de2f8189c..689e1ba62809 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -295,9 +295,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
295 err |= restore_fpu_state(regs, fpu_save); 295 err |= restore_fpu_state(regs, fpu_save);
296 296
297 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); 297 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
298 err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); 298 if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT)
299
300 if (err)
301 goto segv; 299 goto segv;
302 300
303 err |= __get_user(rwin_save, &sf->rwin_save); 301 err |= __get_user(rwin_save, &sf->rwin_save);
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index 44025f4ba41f..8475a474273a 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -47,7 +47,7 @@ STUB: sra REG1, 0, REG1; \
47 sra REG4, 0, REG4 47 sra REG4, 0, REG4
48 48
49SIGN1(sys32_exit, sparc_exit, %o0) 49SIGN1(sys32_exit, sparc_exit, %o0)
50SIGN1(sys32_exit_group, sys_exit_group, %o0) 50SIGN1(sys32_exit_group, sparc_exit_group, %o0)
51SIGN1(sys32_wait4, compat_sys_wait4, %o2) 51SIGN1(sys32_wait4, compat_sys_wait4, %o2)
52SIGN1(sys32_creat, sys_creat, %o1) 52SIGN1(sys32_creat, sys_creat, %o1)
53SIGN1(sys32_mknod, sys_mknod, %o1) 53SIGN1(sys32_mknod, sys_mknod, %o1)
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 0c9b31b22e07..57277c830151 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -34,11 +34,9 @@ asmlinkage unsigned long sys_getpagesize(void)
34 return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */ 34 return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
35} 35}
36 36
37#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
38
39unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) 37unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
40{ 38{
41 struct vm_area_struct * vmm; 39 struct vm_unmapped_area_info info;
42 40
43 if (flags & MAP_FIXED) { 41 if (flags & MAP_FIXED) {
44 /* We do not accept a shared mapping if it would violate 42 /* We do not accept a shared mapping if it would violate
@@ -56,21 +54,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
56 if (!addr) 54 if (!addr)
57 addr = TASK_UNMAPPED_BASE; 55 addr = TASK_UNMAPPED_BASE;
58 56
59 if (flags & MAP_SHARED) 57 info.flags = 0;
60 addr = COLOUR_ALIGN(addr); 58 info.length = len;
61 else 59 info.low_limit = addr;
62 addr = PAGE_ALIGN(addr); 60 info.high_limit = TASK_SIZE;
63 61 info.align_mask = (flags & MAP_SHARED) ?
64 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { 62 (PAGE_MASK & (SHMLBA - 1)) : 0;
65 /* At this point: (!vmm || addr < vmm->vm_end). */ 63 info.align_offset = pgoff << PAGE_SHIFT;
66 if (TASK_SIZE - PAGE_SIZE - len < addr) 64 return vm_unmapped_area(&info);
67 return -ENOMEM;
68 if (!vmm || addr + len <= vmm->vm_start)
69 return addr;
70 addr = vmm->vm_end;
71 if (flags & MAP_SHARED)
72 addr = COLOUR_ALIGN(addr);
73 }
74} 65}
75 66
76/* 67/*
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 878ef3d5fec5..97309c0ec533 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -75,7 +75,7 @@ static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
75 * the spitfire/niagara VA-hole. 75 * the spitfire/niagara VA-hole.
76 */ 76 */
77 77
78static inline unsigned long COLOUR_ALIGN(unsigned long addr, 78static inline unsigned long COLOR_ALIGN(unsigned long addr,
79 unsigned long pgoff) 79 unsigned long pgoff)
80{ 80{
81 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); 81 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
@@ -84,24 +84,13 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr,
84 return base + off; 84 return base + off;
85} 85}
86 86
87static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
88 unsigned long pgoff)
89{
90 unsigned long base = addr & ~(SHMLBA-1);
91 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
92
93 if (base + off <= addr)
94 return base + off;
95 return base - off;
96}
97
98unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) 87unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
99{ 88{
100 struct mm_struct *mm = current->mm; 89 struct mm_struct *mm = current->mm;
101 struct vm_area_struct * vma; 90 struct vm_area_struct * vma;
102 unsigned long task_size = TASK_SIZE; 91 unsigned long task_size = TASK_SIZE;
103 unsigned long start_addr;
104 int do_color_align; 92 int do_color_align;
93 struct vm_unmapped_area_info info;
105 94
106 if (flags & MAP_FIXED) { 95 if (flags & MAP_FIXED) {
107 /* We do not accept a shared mapping if it would violate 96 /* We do not accept a shared mapping if it would violate
@@ -124,7 +113,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
124 113
125 if (addr) { 114 if (addr) {
126 if (do_color_align) 115 if (do_color_align)
127 addr = COLOUR_ALIGN(addr, pgoff); 116 addr = COLOR_ALIGN(addr, pgoff);
128 else 117 else
129 addr = PAGE_ALIGN(addr); 118 addr = PAGE_ALIGN(addr);
130 119
@@ -134,50 +123,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
134 return addr; 123 return addr;
135 } 124 }
136 125
137 if (len > mm->cached_hole_size) { 126 info.flags = 0;
138 start_addr = addr = mm->free_area_cache; 127 info.length = len;
139 } else { 128 info.low_limit = TASK_UNMAPPED_BASE;
140 start_addr = addr = TASK_UNMAPPED_BASE; 129 info.high_limit = min(task_size, VA_EXCLUDE_START);
141 mm->cached_hole_size = 0; 130 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
131 info.align_offset = pgoff << PAGE_SHIFT;
132 addr = vm_unmapped_area(&info);
133
134 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
135 VM_BUG_ON(addr != -ENOMEM);
136 info.low_limit = VA_EXCLUDE_END;
137 info.high_limit = task_size;
138 addr = vm_unmapped_area(&info);
142 } 139 }
143 140
144 task_size -= len; 141 return addr;
145
146full_search:
147 if (do_color_align)
148 addr = COLOUR_ALIGN(addr, pgoff);
149 else
150 addr = PAGE_ALIGN(addr);
151
152 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
153 /* At this point: (!vma || addr < vma->vm_end). */
154 if (addr < VA_EXCLUDE_START &&
155 (addr + len) >= VA_EXCLUDE_START) {
156 addr = VA_EXCLUDE_END;
157 vma = find_vma(mm, VA_EXCLUDE_END);
158 }
159 if (unlikely(task_size < addr)) {
160 if (start_addr != TASK_UNMAPPED_BASE) {
161 start_addr = addr = TASK_UNMAPPED_BASE;
162 mm->cached_hole_size = 0;
163 goto full_search;
164 }
165 return -ENOMEM;
166 }
167 if (likely(!vma || addr + len <= vma->vm_start)) {
168 /*
169 * Remember the place where we stopped the search:
170 */
171 mm->free_area_cache = addr + len;
172 return addr;
173 }
174 if (addr + mm->cached_hole_size < vma->vm_start)
175 mm->cached_hole_size = vma->vm_start - addr;
176
177 addr = vma->vm_end;
178 if (do_color_align)
179 addr = COLOUR_ALIGN(addr, pgoff);
180 }
181} 142}
182 143
183unsigned long 144unsigned long
@@ -190,6 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
190 unsigned long task_size = STACK_TOP32; 151 unsigned long task_size = STACK_TOP32;
191 unsigned long addr = addr0; 152 unsigned long addr = addr0;
192 int do_color_align; 153 int do_color_align;
154 struct vm_unmapped_area_info info;
193 155
194 /* This should only ever run for 32-bit processes. */ 156 /* This should only ever run for 32-bit processes. */
195 BUG_ON(!test_thread_flag(TIF_32BIT)); 157 BUG_ON(!test_thread_flag(TIF_32BIT));
@@ -214,7 +176,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
214 /* requesting a specific address */ 176 /* requesting a specific address */
215 if (addr) { 177 if (addr) {
216 if (do_color_align) 178 if (do_color_align)
217 addr = COLOUR_ALIGN(addr, pgoff); 179 addr = COLOR_ALIGN(addr, pgoff);
218 else 180 else
219 addr = PAGE_ALIGN(addr); 181 addr = PAGE_ALIGN(addr);
220 182
@@ -224,73 +186,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
224 return addr; 186 return addr;
225 } 187 }
226 188
227 /* check if free_area_cache is useful for us */ 189 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
228 if (len <= mm->cached_hole_size) { 190 info.length = len;
229 mm->cached_hole_size = 0; 191 info.low_limit = PAGE_SIZE;
230 mm->free_area_cache = mm->mmap_base; 192 info.high_limit = mm->mmap_base;
231 } 193 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
232 194 info.align_offset = pgoff << PAGE_SHIFT;
233 /* either no address requested or can't fit in requested address hole */ 195 addr = vm_unmapped_area(&info);
234 addr = mm->free_area_cache;
235 if (do_color_align) {
236 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
237 196
238 addr = base + len;
239 }
240
241 /* make sure it can fit in the remaining address space */
242 if (likely(addr > len)) {
243 vma = find_vma(mm, addr-len);
244 if (!vma || addr <= vma->vm_start) {
245 /* remember the address as a hint for next time */
246 return (mm->free_area_cache = addr-len);
247 }
248 }
249
250 if (unlikely(mm->mmap_base < len))
251 goto bottomup;
252
253 addr = mm->mmap_base-len;
254 if (do_color_align)
255 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
256
257 do {
258 /*
259 * Lookup failure means no vma is above this address,
260 * else if new region fits below vma->vm_start,
261 * return with success:
262 */
263 vma = find_vma(mm, addr);
264 if (likely(!vma || addr+len <= vma->vm_start)) {
265 /* remember the address as a hint for next time */
266 return (mm->free_area_cache = addr);
267 }
268
269 /* remember the largest hole we saw so far */
270 if (addr + mm->cached_hole_size < vma->vm_start)
271 mm->cached_hole_size = vma->vm_start - addr;
272
273 /* try just below the current vma->vm_start */
274 addr = vma->vm_start-len;
275 if (do_color_align)
276 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
277 } while (likely(len < vma->vm_start));
278
279bottomup:
280 /* 197 /*
281 * A failed mmap() very likely causes application failure, 198 * A failed mmap() very likely causes application failure,
282 * so fall back to the bottom-up function here. This scenario 199 * so fall back to the bottom-up function here. This scenario
283 * can happen with large stack limits and large mmap() 200 * can happen with large stack limits and large mmap()
284 * allocations. 201 * allocations.
285 */ 202 */
286 mm->cached_hole_size = ~0UL; 203 if (addr & ~PAGE_MASK) {
287 mm->free_area_cache = TASK_UNMAPPED_BASE; 204 VM_BUG_ON(addr != -ENOMEM);
288 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 205 info.flags = 0;
289 /* 206 info.low_limit = TASK_UNMAPPED_BASE;
290 * Restore the topdown base: 207 info.high_limit = STACK_TOP32;
291 */ 208 addr = vm_unmapped_area(&info);
292 mm->free_area_cache = mm->mmap_base; 209 }
293 mm->cached_hole_size = ~0UL;
294 210
295 return addr; 211 return addr;
296} 212}
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 7f5f65d0b3fd..bf2347794e33 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -118,10 +118,20 @@ ret_from_syscall:
118 ba,pt %xcc, ret_sys_call 118 ba,pt %xcc, ret_sys_call
119 ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 119 ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0
120 120
121 .globl sparc_exit_group
122 .type sparc_exit_group,#function
123sparc_exit_group:
124 sethi %hi(sys_exit_group), %g7
125 ba,pt %xcc, 1f
126 or %g7, %lo(sys_exit_group), %g7
127 .size sparc_exit_group,.-sparc_exit_group
128
121 .globl sparc_exit 129 .globl sparc_exit
122 .type sparc_exit,#function 130 .type sparc_exit,#function
123sparc_exit: 131sparc_exit:
124 rdpr %pstate, %g2 132 sethi %hi(sys_exit), %g7
133 or %g7, %lo(sys_exit), %g7
1341: rdpr %pstate, %g2
125 wrpr %g2, PSTATE_IE, %pstate 135 wrpr %g2, PSTATE_IE, %pstate
126 rdpr %otherwin, %g1 136 rdpr %otherwin, %g1
127 rdpr %cansave, %g3 137 rdpr %cansave, %g3
@@ -129,7 +139,7 @@ sparc_exit:
129 wrpr %g3, 0x0, %cansave 139 wrpr %g3, 0x0, %cansave
130 wrpr %g0, 0x0, %otherwin 140 wrpr %g0, 0x0, %otherwin
131 wrpr %g2, 0x0, %pstate 141 wrpr %g2, 0x0, %pstate
132 ba,pt %xcc, sys_exit 142 jmpl %g7, %g0
133 stb %g0, [%g6 + TI_WSAVED] 143 stb %g0, [%g6 + TI_WSAVED]
134 .size sparc_exit,.-sparc_exit 144 .size sparc_exit,.-sparc_exit
135 145
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 1c9af9fa38e9..017b74a63dcb 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -133,7 +133,7 @@ sys_call_table:
133/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents 133/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
134 .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr 134 .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
135/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall 135/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall
136 .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname 136 .word sys_setpgid, sys_fremovexattr, sys_tkill, sparc_exit_group, sys_newuname
137/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl 137/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
138 .word sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_nis_syscall, sys_sgetmask 138 .word sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_nis_syscall, sys_sgetmask
139/*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall 139/*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index f76f83d5ac63..d2b59441ebdd 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -30,55 +30,28 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
30 unsigned long pgoff, 30 unsigned long pgoff,
31 unsigned long flags) 31 unsigned long flags)
32{ 32{
33 struct mm_struct *mm = current->mm;
34 struct vm_area_struct * vma;
35 unsigned long task_size = TASK_SIZE; 33 unsigned long task_size = TASK_SIZE;
36 unsigned long start_addr; 34 struct vm_unmapped_area_info info;
37 35
38 if (test_thread_flag(TIF_32BIT)) 36 if (test_thread_flag(TIF_32BIT))
39 task_size = STACK_TOP32; 37 task_size = STACK_TOP32;
40 if (unlikely(len >= VA_EXCLUDE_START))
41 return -ENOMEM;
42 38
43 if (len > mm->cached_hole_size) { 39 info.flags = 0;
44 start_addr = addr = mm->free_area_cache; 40 info.length = len;
45 } else { 41 info.low_limit = TASK_UNMAPPED_BASE;
46 start_addr = addr = TASK_UNMAPPED_BASE; 42 info.high_limit = min(task_size, VA_EXCLUDE_START);
47 mm->cached_hole_size = 0; 43 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
44 info.align_offset = 0;
45 addr = vm_unmapped_area(&info);
46
47 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
48 VM_BUG_ON(addr != -ENOMEM);
49 info.low_limit = VA_EXCLUDE_END;
50 info.high_limit = task_size;
51 addr = vm_unmapped_area(&info);
48 } 52 }
49 53
50 task_size -= len; 54 return addr;
51
52full_search:
53 addr = ALIGN(addr, HPAGE_SIZE);
54
55 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
56 /* At this point: (!vma || addr < vma->vm_end). */
57 if (addr < VA_EXCLUDE_START &&
58 (addr + len) >= VA_EXCLUDE_START) {
59 addr = VA_EXCLUDE_END;
60 vma = find_vma(mm, VA_EXCLUDE_END);
61 }
62 if (unlikely(task_size < addr)) {
63 if (start_addr != TASK_UNMAPPED_BASE) {
64 start_addr = addr = TASK_UNMAPPED_BASE;
65 mm->cached_hole_size = 0;
66 goto full_search;
67 }
68 return -ENOMEM;
69 }
70 if (likely(!vma || addr + len <= vma->vm_start)) {
71 /*
72 * Remember the place where we stopped the search:
73 */
74 mm->free_area_cache = addr + len;
75 return addr;
76 }
77 if (addr + mm->cached_hole_size < vma->vm_start)
78 mm->cached_hole_size = vma->vm_start - addr;
79
80 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
81 }
82} 55}
83 56
84static unsigned long 57static unsigned long
@@ -87,71 +60,34 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
87 const unsigned long pgoff, 60 const unsigned long pgoff,
88 const unsigned long flags) 61 const unsigned long flags)
89{ 62{
90 struct vm_area_struct *vma;
91 struct mm_struct *mm = current->mm; 63 struct mm_struct *mm = current->mm;
92 unsigned long addr = addr0; 64 unsigned long addr = addr0;
65 struct vm_unmapped_area_info info;
93 66
94 /* This should only ever run for 32-bit processes. */ 67 /* This should only ever run for 32-bit processes. */
95 BUG_ON(!test_thread_flag(TIF_32BIT)); 68 BUG_ON(!test_thread_flag(TIF_32BIT));
96 69
97 /* check if free_area_cache is useful for us */ 70 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
98 if (len <= mm->cached_hole_size) { 71 info.length = len;
99 mm->cached_hole_size = 0; 72 info.low_limit = PAGE_SIZE;
100 mm->free_area_cache = mm->mmap_base; 73 info.high_limit = mm->mmap_base;
101 } 74 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
102 75 info.align_offset = 0;
103 /* either no address requested or can't fit in requested address hole */ 76 addr = vm_unmapped_area(&info);
104 addr = mm->free_area_cache & HPAGE_MASK;
105
106 /* make sure it can fit in the remaining address space */
107 if (likely(addr > len)) {
108 vma = find_vma(mm, addr-len);
109 if (!vma || addr <= vma->vm_start) {
110 /* remember the address as a hint for next time */
111 return (mm->free_area_cache = addr-len);
112 }
113 }
114
115 if (unlikely(mm->mmap_base < len))
116 goto bottomup;
117
118 addr = (mm->mmap_base-len) & HPAGE_MASK;
119
120 do {
121 /*
122 * Lookup failure means no vma is above this address,
123 * else if new region fits below vma->vm_start,
124 * return with success:
125 */
126 vma = find_vma(mm, addr);
127 if (likely(!vma || addr+len <= vma->vm_start)) {
128 /* remember the address as a hint for next time */
129 return (mm->free_area_cache = addr);
130 }
131
132 /* remember the largest hole we saw so far */
133 if (addr + mm->cached_hole_size < vma->vm_start)
134 mm->cached_hole_size = vma->vm_start - addr;
135
136 /* try just below the current vma->vm_start */
137 addr = (vma->vm_start-len) & HPAGE_MASK;
138 } while (likely(len < vma->vm_start));
139 77
140bottomup:
141 /* 78 /*
142 * A failed mmap() very likely causes application failure, 79 * A failed mmap() very likely causes application failure,
143 * so fall back to the bottom-up function here. This scenario 80 * so fall back to the bottom-up function here. This scenario
144 * can happen with large stack limits and large mmap() 81 * can happen with large stack limits and large mmap()
145 * allocations. 82 * allocations.
146 */ 83 */
147 mm->cached_hole_size = ~0UL; 84 if (addr & ~PAGE_MASK) {
148 mm->free_area_cache = TASK_UNMAPPED_BASE; 85 VM_BUG_ON(addr != -ENOMEM);
149 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 86 info.flags = 0;
150 /* 87 info.low_limit = TASK_UNMAPPED_BASE;
151 * Restore the topdown base: 88 info.high_limit = STACK_TOP32;
152 */ 89 addr = vm_unmapped_area(&info);
153 mm->free_area_cache = mm->mmap_base; 90 }
154 mm->cached_hole_size = ~0UL;
155 91
156 return addr; 92 return addr;
157} 93}