diff options
Diffstat (limited to 'arch/blackfin/kernel/process.c')
-rw-r--r-- | arch/blackfin/kernel/process.c | 69 |
1 files changed, 45 insertions, 24 deletions
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 6b8459c66163..be9fdd00d7cb 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <linux/unistd.h> | 32 | #include <linux/unistd.h> |
33 | #include <linux/user.h> | 33 | #include <linux/user.h> |
34 | #include <linux/uaccess.h> | 34 | #include <linux/uaccess.h> |
35 | #include <linux/sched.h> | ||
36 | #include <linux/tick.h> | ||
35 | #include <linux/fs.h> | 37 | #include <linux/fs.h> |
36 | #include <linux/err.h> | 38 | #include <linux/err.h> |
37 | 39 | ||
@@ -69,33 +71,44 @@ EXPORT_SYMBOL(pm_power_off); | |||
69 | * The idle loop on BFIN | 71 | * The idle loop on BFIN |
70 | */ | 72 | */ |
71 | #ifdef CONFIG_IDLE_L1 | 73 | #ifdef CONFIG_IDLE_L1 |
72 | void default_idle(void)__attribute__((l1_text)); | 74 | static void default_idle(void)__attribute__((l1_text)); |
73 | void cpu_idle(void)__attribute__((l1_text)); | 75 | void cpu_idle(void)__attribute__((l1_text)); |
74 | #endif | 76 | #endif |
75 | 77 | ||
76 | void default_idle(void) | 78 | /* |
79 | * This is our default idle handler. We need to disable | ||
80 | * interrupts here to ensure we don't miss a wakeup call. | ||
81 | */ | ||
82 | static void default_idle(void) | ||
77 | { | 83 | { |
78 | while (!need_resched()) { | 84 | local_irq_disable(); |
79 | local_irq_disable(); | 85 | if (!need_resched()) |
80 | if (likely(!need_resched())) | 86 | idle_with_irq_disabled(); |
81 | idle_with_irq_disabled(); | ||
82 | local_irq_enable(); | ||
83 | } | ||
84 | } | ||
85 | 87 | ||
86 | void (*idle)(void) = default_idle; | 88 | local_irq_enable(); |
89 | } | ||
87 | 90 | ||
88 | /* | 91 | /* |
89 | * The idle thread. There's no useful work to be | 92 | * The idle thread. We try to conserve power, while trying to keep |
90 | * done, so just try to conserve power and have a | 93 | * overall latency low. The architecture specific idle is passed |
91 | * low exit latency (ie sit in a loop waiting for | 94 | * a value to indicate the level of "idleness" of the system. |
92 | * somebody to say that they'd like to reschedule) | ||
93 | */ | 95 | */ |
94 | void cpu_idle(void) | 96 | void cpu_idle(void) |
95 | { | 97 | { |
96 | /* endless idle loop with no priority at all */ | 98 | /* endless idle loop with no priority at all */ |
97 | while (1) { | 99 | while (1) { |
98 | idle(); | 100 | void (*idle)(void) = pm_idle; |
101 | |||
102 | #ifdef CONFIG_HOTPLUG_CPU | ||
103 | if (cpu_is_offline(smp_processor_id())) | ||
104 | cpu_die(); | ||
105 | #endif | ||
106 | if (!idle) | ||
107 | idle = default_idle; | ||
108 | tick_nohz_stop_sched_tick(); | ||
109 | while (!need_resched()) | ||
110 | idle(); | ||
111 | tick_nohz_restart_sched_tick(); | ||
99 | preempt_enable_no_resched(); | 112 | preempt_enable_no_resched(); |
100 | schedule(); | 113 | schedule(); |
101 | preempt_disable(); | 114 | preempt_disable(); |
@@ -189,7 +202,7 @@ copy_thread(int nr, unsigned long clone_flags, | |||
189 | * sys_execve() executes a new program. | 202 | * sys_execve() executes a new program. |
190 | */ | 203 | */ |
191 | 204 | ||
192 | asmlinkage int sys_execve(char *name, char **argv, char **envp) | 205 | asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp) |
193 | { | 206 | { |
194 | int error; | 207 | int error; |
195 | char *filename; | 208 | char *filename; |
@@ -232,23 +245,25 @@ unsigned long get_wchan(struct task_struct *p) | |||
232 | 245 | ||
233 | void finish_atomic_sections (struct pt_regs *regs) | 246 | void finish_atomic_sections (struct pt_regs *regs) |
234 | { | 247 | { |
248 | int __user *up0 = (int __user *)®s->p0; | ||
249 | |||
235 | if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END) | 250 | if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END) |
236 | return; | 251 | return; |
237 | 252 | ||
238 | switch (regs->pc) { | 253 | switch (regs->pc) { |
239 | case ATOMIC_XCHG32 + 2: | 254 | case ATOMIC_XCHG32 + 2: |
240 | put_user(regs->r1, (int *)regs->p0); | 255 | put_user(regs->r1, up0); |
241 | regs->pc += 2; | 256 | regs->pc += 2; |
242 | break; | 257 | break; |
243 | 258 | ||
244 | case ATOMIC_CAS32 + 2: | 259 | case ATOMIC_CAS32 + 2: |
245 | case ATOMIC_CAS32 + 4: | 260 | case ATOMIC_CAS32 + 4: |
246 | if (regs->r0 == regs->r1) | 261 | if (regs->r0 == regs->r1) |
247 | put_user(regs->r2, (int *)regs->p0); | 262 | put_user(regs->r2, up0); |
248 | regs->pc = ATOMIC_CAS32 + 8; | 263 | regs->pc = ATOMIC_CAS32 + 8; |
249 | break; | 264 | break; |
250 | case ATOMIC_CAS32 + 6: | 265 | case ATOMIC_CAS32 + 6: |
251 | put_user(regs->r2, (int *)regs->p0); | 266 | put_user(regs->r2, up0); |
252 | regs->pc += 2; | 267 | regs->pc += 2; |
253 | break; | 268 | break; |
254 | 269 | ||
@@ -256,7 +271,7 @@ void finish_atomic_sections (struct pt_regs *regs) | |||
256 | regs->r0 = regs->r1 + regs->r0; | 271 | regs->r0 = regs->r1 + regs->r0; |
257 | /* fall through */ | 272 | /* fall through */ |
258 | case ATOMIC_ADD32 + 4: | 273 | case ATOMIC_ADD32 + 4: |
259 | put_user(regs->r0, (int *)regs->p0); | 274 | put_user(regs->r0, up0); |
260 | regs->pc = ATOMIC_ADD32 + 6; | 275 | regs->pc = ATOMIC_ADD32 + 6; |
261 | break; | 276 | break; |
262 | 277 | ||
@@ -264,7 +279,7 @@ void finish_atomic_sections (struct pt_regs *regs) | |||
264 | regs->r0 = regs->r1 - regs->r0; | 279 | regs->r0 = regs->r1 - regs->r0; |
265 | /* fall through */ | 280 | /* fall through */ |
266 | case ATOMIC_SUB32 + 4: | 281 | case ATOMIC_SUB32 + 4: |
267 | put_user(regs->r0, (int *)regs->p0); | 282 | put_user(regs->r0, up0); |
268 | regs->pc = ATOMIC_SUB32 + 6; | 283 | regs->pc = ATOMIC_SUB32 + 6; |
269 | break; | 284 | break; |
270 | 285 | ||
@@ -272,7 +287,7 @@ void finish_atomic_sections (struct pt_regs *regs) | |||
272 | regs->r0 = regs->r1 | regs->r0; | 287 | regs->r0 = regs->r1 | regs->r0; |
273 | /* fall through */ | 288 | /* fall through */ |
274 | case ATOMIC_IOR32 + 4: | 289 | case ATOMIC_IOR32 + 4: |
275 | put_user(regs->r0, (int *)regs->p0); | 290 | put_user(regs->r0, up0); |
276 | regs->pc = ATOMIC_IOR32 + 6; | 291 | regs->pc = ATOMIC_IOR32 + 6; |
277 | break; | 292 | break; |
278 | 293 | ||
@@ -280,7 +295,7 @@ void finish_atomic_sections (struct pt_regs *regs) | |||
280 | regs->r0 = regs->r1 & regs->r0; | 295 | regs->r0 = regs->r1 & regs->r0; |
281 | /* fall through */ | 296 | /* fall through */ |
282 | case ATOMIC_AND32 + 4: | 297 | case ATOMIC_AND32 + 4: |
283 | put_user(regs->r0, (int *)regs->p0); | 298 | put_user(regs->r0, up0); |
284 | regs->pc = ATOMIC_AND32 + 6; | 299 | regs->pc = ATOMIC_AND32 + 6; |
285 | break; | 300 | break; |
286 | 301 | ||
@@ -288,7 +303,7 @@ void finish_atomic_sections (struct pt_regs *regs) | |||
288 | regs->r0 = regs->r1 ^ regs->r0; | 303 | regs->r0 = regs->r1 ^ regs->r0; |
289 | /* fall through */ | 304 | /* fall through */ |
290 | case ATOMIC_XOR32 + 4: | 305 | case ATOMIC_XOR32 + 4: |
291 | put_user(regs->r0, (int *)regs->p0); | 306 | put_user(regs->r0, up0); |
292 | regs->pc = ATOMIC_XOR32 + 6; | 307 | regs->pc = ATOMIC_XOR32 + 6; |
293 | break; | 308 | break; |
294 | } | 309 | } |
@@ -309,6 +324,12 @@ int _access_ok(unsigned long addr, unsigned long size) | |||
309 | return 1; | 324 | return 1; |
310 | if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end) | 325 | if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end) |
311 | return 1; | 326 | return 1; |
327 | |||
328 | #ifdef CONFIG_ROMFS_MTD_FS | ||
329 | /* For XIP, allow user space to use pointers within the ROMFS. */ | ||
330 | if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end) | ||
331 | return 1; | ||
332 | #endif | ||
312 | #else | 333 | #else |
313 | if (addr >= memory_start && (addr + size) <= physical_mem_end) | 334 | if (addr >= memory_start && (addr + size) <= physical_mem_end) |
314 | return 1; | 335 | return 1; |