aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel/process.c')
-rw-r--r--arch/blackfin/kernel/process.c95
1 files changed, 73 insertions, 22 deletions
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 45876427eb2d..b56b0e485e0b 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -258,9 +258,12 @@ void finish_atomic_sections (struct pt_regs *regs)
258 int __user *up0 = (int __user *)regs->p0; 258 int __user *up0 = (int __user *)regs->p0;
259 259
260 switch (regs->pc) { 260 switch (regs->pc) {
261 default:
262 /* not in middle of an atomic step, so resume like normal */
263 return;
264
261 case ATOMIC_XCHG32 + 2: 265 case ATOMIC_XCHG32 + 2:
262 put_user(regs->r1, up0); 266 put_user(regs->r1, up0);
263 regs->pc = ATOMIC_XCHG32 + 4;
264 break; 267 break;
265 268
266 case ATOMIC_CAS32 + 2: 269 case ATOMIC_CAS32 + 2:
@@ -268,7 +271,6 @@ void finish_atomic_sections (struct pt_regs *regs)
268 if (regs->r0 == regs->r1) 271 if (regs->r0 == regs->r1)
269 case ATOMIC_CAS32 + 6: 272 case ATOMIC_CAS32 + 6:
270 put_user(regs->r2, up0); 273 put_user(regs->r2, up0);
271 regs->pc = ATOMIC_CAS32 + 8;
272 break; 274 break;
273 275
274 case ATOMIC_ADD32 + 2: 276 case ATOMIC_ADD32 + 2:
@@ -276,7 +278,6 @@ void finish_atomic_sections (struct pt_regs *regs)
276 /* fall through */ 278 /* fall through */
277 case ATOMIC_ADD32 + 4: 279 case ATOMIC_ADD32 + 4:
278 put_user(regs->r0, up0); 280 put_user(regs->r0, up0);
279 regs->pc = ATOMIC_ADD32 + 6;
280 break; 281 break;
281 282
282 case ATOMIC_SUB32 + 2: 283 case ATOMIC_SUB32 + 2:
@@ -284,7 +285,6 @@ void finish_atomic_sections (struct pt_regs *regs)
284 /* fall through */ 285 /* fall through */
285 case ATOMIC_SUB32 + 4: 286 case ATOMIC_SUB32 + 4:
286 put_user(regs->r0, up0); 287 put_user(regs->r0, up0);
287 regs->pc = ATOMIC_SUB32 + 6;
288 break; 288 break;
289 289
290 case ATOMIC_IOR32 + 2: 290 case ATOMIC_IOR32 + 2:
@@ -292,7 +292,6 @@ void finish_atomic_sections (struct pt_regs *regs)
292 /* fall through */ 292 /* fall through */
293 case ATOMIC_IOR32 + 4: 293 case ATOMIC_IOR32 + 4:
294 put_user(regs->r0, up0); 294 put_user(regs->r0, up0);
295 regs->pc = ATOMIC_IOR32 + 6;
296 break; 295 break;
297 296
298 case ATOMIC_AND32 + 2: 297 case ATOMIC_AND32 + 2:
@@ -300,7 +299,6 @@ void finish_atomic_sections (struct pt_regs *regs)
300 /* fall through */ 299 /* fall through */
301 case ATOMIC_AND32 + 4: 300 case ATOMIC_AND32 + 4:
302 put_user(regs->r0, up0); 301 put_user(regs->r0, up0);
303 regs->pc = ATOMIC_AND32 + 6;
304 break; 302 break;
305 303
306 case ATOMIC_XOR32 + 2: 304 case ATOMIC_XOR32 + 2:
@@ -308,9 +306,15 @@ void finish_atomic_sections (struct pt_regs *regs)
308 /* fall through */ 306 /* fall through */
309 case ATOMIC_XOR32 + 4: 307 case ATOMIC_XOR32 + 4:
310 put_user(regs->r0, up0); 308 put_user(regs->r0, up0);
311 regs->pc = ATOMIC_XOR32 + 6;
312 break; 309 break;
313 } 310 }
311
312 /*
313 * We've finished the atomic section, and the only thing left for
314 * userspace is to do a RTS, so we might as well handle that too
315 * since we need to update the PC anyways.
316 */
317 regs->pc = regs->rets;
314} 318}
315 319
316static inline 320static inline
@@ -332,12 +336,58 @@ int in_mem_const(unsigned long addr, unsigned long size,
332{ 336{
333 return in_mem_const_off(addr, size, 0, const_addr, const_size); 337 return in_mem_const_off(addr, size, 0, const_addr, const_size);
334} 338}
335#define IN_ASYNC(bnum, bctlnum) \ 339#define ASYNC_ENABLED(bnum, bctlnum) \
336({ \ 340({ \
337 (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? -EFAULT : \ 341 (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? 0 : \
338 bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? -EFAULT : \ 342 bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? 0 : \
339 BFIN_MEM_ACCESS_CORE; \ 343 1; \
340}) 344})
345/*
346 * We can't read EBIU banks that aren't enabled or we end up hanging
347 * on the access to the async space. Make sure we validate accesses
348 * that cross async banks too.
349 * 0 - found, but unusable
350 * 1 - found & usable
351 * 2 - not found
352 */
353static
354int in_async(unsigned long addr, unsigned long size)
355{
356 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE) {
357 if (!ASYNC_ENABLED(0, 0))
358 return 0;
359 if (addr + size <= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)
360 return 1;
361 size -= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE - addr;
362 addr = ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE;
363 }
364 if (addr >= ASYNC_BANK1_BASE && addr < ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE) {
365 if (!ASYNC_ENABLED(1, 0))
366 return 0;
367 if (addr + size <= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)
368 return 1;
369 size -= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE - addr;
370 addr = ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE;
371 }
372 if (addr >= ASYNC_BANK2_BASE && addr < ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE) {
373 if (!ASYNC_ENABLED(2, 1))
374 return 0;
375 if (addr + size <= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE)
376 return 1;
377 size -= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE - addr;
378 addr = ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE;
379 }
380 if (addr >= ASYNC_BANK3_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
381 if (ASYNC_ENABLED(3, 1))
382 return 0;
383 if (addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
384 return 1;
385 return 0;
386 }
387
388 /* not within async bounds */
389 return 2;
390}
341 391
342int bfin_mem_access_type(unsigned long addr, unsigned long size) 392int bfin_mem_access_type(unsigned long addr, unsigned long size)
343{ 393{
@@ -374,17 +424,11 @@ int bfin_mem_access_type(unsigned long addr, unsigned long size)
374 if (addr >= SYSMMR_BASE) 424 if (addr >= SYSMMR_BASE)
375 return BFIN_MEM_ACCESS_CORE_ONLY; 425 return BFIN_MEM_ACCESS_CORE_ONLY;
376 426
377 /* We can't read EBIU banks that aren't enabled or we end up hanging 427 switch (in_async(addr, size)) {
378 * on the access to the async space. 428 case 0: return -EFAULT;
379 */ 429 case 1: return BFIN_MEM_ACCESS_CORE;
380 if (in_mem_const(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK0_SIZE)) 430 case 2: /* fall through */;
381 return IN_ASYNC(0, 0); 431 }
382 if (in_mem_const(addr, size, ASYNC_BANK1_BASE, ASYNC_BANK1_SIZE))
383 return IN_ASYNC(1, 0);
384 if (in_mem_const(addr, size, ASYNC_BANK2_BASE, ASYNC_BANK2_SIZE))
385 return IN_ASYNC(2, 1);
386 if (in_mem_const(addr, size, ASYNC_BANK3_BASE, ASYNC_BANK3_SIZE))
387 return IN_ASYNC(3, 1);
388 432
389 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH)) 433 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
390 return BFIN_MEM_ACCESS_CORE; 434 return BFIN_MEM_ACCESS_CORE;
@@ -401,6 +445,8 @@ __attribute__((l1_text))
401/* Return 1 if access to memory range is OK, 0 otherwise */ 445/* Return 1 if access to memory range is OK, 0 otherwise */
402int _access_ok(unsigned long addr, unsigned long size) 446int _access_ok(unsigned long addr, unsigned long size)
403{ 447{
448 int aret;
449
404 if (size == 0) 450 if (size == 0)
405 return 1; 451 return 1;
406 /* Check that things do not wrap around */ 452 /* Check that things do not wrap around */
@@ -450,6 +496,11 @@ int _access_ok(unsigned long addr, unsigned long size)
450 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH)) 496 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
451 return 1; 497 return 1;
452#endif 498#endif
499
500 aret = in_async(addr, size);
501 if (aret < 2)
502 return aret;
503
453 if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH)) 504 if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
454 return 1; 505 return 1;
455 506