diff options
author | Nicolas Pitre <nico@org.rmk.(none)> | 2005-06-08 14:00:47 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2005-06-08 14:00:47 -0400 |
commit | dcef1f634657dabe7905af3ccda12cf7f0b6fcc1 (patch) | |
tree | e1b8bf471c3c268d0be919561268c24f298d031b /arch | |
parent | aeabbbbe126f3d5e61e2db07629443cd10932bb2 (diff) |
[PATCH] ARM: 2664/2: add support for atomic ops on pre-ARMv6 SMP systems
Patch from Nicolas Pitre
Not that there might be many of them on the planet, but at least RMK
apparently has one.
Signed-off-by: Nicolas Pitre
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 16 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 49 | ||||
-rw-r--r-- | arch/arm/mm/Kconfig | 8 |
3 files changed, 68 insertions, 5 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 4eb36155dc93..e14278d59882 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -269,7 +269,7 @@ __pabt_svc: | |||
269 | add r5, sp, #S_PC | 269 | add r5, sp, #S_PC |
270 | ldmia r7, {r2 - r4} @ Get USR pc, cpsr | 270 | ldmia r7, {r2 - r4} @ Get USR pc, cpsr |
271 | 271 | ||
272 | #if __LINUX_ARM_ARCH__ < 6 | 272 | #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) |
273 | @ make sure our user space atomic helper is aborted | 273 | @ make sure our user space atomic helper is aborted |
274 | cmp r2, #VIRT_OFFSET | 274 | cmp r2, #VIRT_OFFSET |
275 | bichs r3, r3, #PSR_Z_BIT | 275 | bichs r3, r3, #PSR_Z_BIT |
@@ -616,11 +616,17 @@ __kuser_helper_start: | |||
616 | 616 | ||
617 | __kuser_cmpxchg: @ 0xffff0fc0 | 617 | __kuser_cmpxchg: @ 0xffff0fc0 |
618 | 618 | ||
619 | #if __LINUX_ARM_ARCH__ < 6 | 619 | #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) |
620 | 620 | ||
621 | #ifdef CONFIG_SMP /* sanity check */ | 621 | /* |
622 | #error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?" | 622 | * Poor you. No fast solution possible... |
623 | #endif | 623 | * The kernel itself must perform the operation. |
624 | * A special ghost syscall is used for that (see traps.c). | ||
625 | */ | ||
626 | swi #0x9ffff0 | ||
627 | mov pc, lr | ||
628 | |||
629 | #elif __LINUX_ARM_ARCH__ < 6 | ||
624 | 630 | ||
625 | /* | 631 | /* |
626 | * Theory of operation: | 632 | * Theory of operation: |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 14df16b983f4..45d2a032d890 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -464,6 +464,55 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) | |||
464 | #endif | 464 | #endif |
465 | return 0; | 465 | return 0; |
466 | 466 | ||
467 | #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG | ||
468 | /* | ||
469 | * Atomically store r1 in *r2 if *r2 is equal to r0 for user space. | ||
470 | * Return zero in r0 if *MEM was changed or non-zero if no exchange | ||
471 | * happened. Also set the user C flag accordingly. | ||
472 | * If access permissions have to be fixed up then non-zero is | ||
473 | * returned and the operation has to be re-attempted. | ||
474 | * | ||
475 | * *NOTE*: This is a ghost syscall private to the kernel. Only the | ||
476 | * __kuser_cmpxchg code in entry-armv.S should be aware of its | ||
477 | * existence. Don't ever use this from user code. | ||
478 | */ | ||
479 | case 0xfff0: | ||
480 | { | ||
481 | extern void do_DataAbort(unsigned long addr, unsigned int fsr, | ||
482 | struct pt_regs *regs); | ||
483 | unsigned long val; | ||
484 | unsigned long addr = regs->ARM_r2; | ||
485 | struct mm_struct *mm = current->mm; | ||
486 | pgd_t *pgd; pmd_t *pmd; pte_t *pte; | ||
487 | |||
488 | regs->ARM_cpsr &= ~PSR_C_BIT; | ||
489 | spin_lock(&mm->page_table_lock); | ||
490 | pgd = pgd_offset(mm, addr); | ||
491 | if (!pgd_present(*pgd)) | ||
492 | goto bad_access; | ||
493 | pmd = pmd_offset(pgd, addr); | ||
494 | if (!pmd_present(*pmd)) | ||
495 | goto bad_access; | ||
496 | pte = pte_offset_map(pmd, addr); | ||
497 | if (!pte_present(*pte) || !pte_write(*pte)) | ||
498 | goto bad_access; | ||
499 | val = *(unsigned long *)addr; | ||
500 | val -= regs->ARM_r0; | ||
501 | if (val == 0) { | ||
502 | *(unsigned long *)addr = regs->ARM_r1; | ||
503 | regs->ARM_cpsr |= PSR_C_BIT; | ||
504 | } | ||
505 | spin_unlock(&mm->page_table_lock); | ||
506 | return val; | ||
507 | |||
508 | bad_access: | ||
509 | spin_unlock(&mm->page_table_lock); | ||
510 | /* simulate a read access fault */ | ||
511 | do_DataAbort(addr, 15 + (1 << 11), regs); | ||
512 | return -1; | ||
513 | } | ||
514 | #endif | ||
515 | |||
467 | default: | 516 | default: |
468 | /* Calls 9f00xx..9f07ff are defined to return -ENOSYS | 517 | /* Calls 9f00xx..9f07ff are defined to return -ENOSYS |
469 | if not implemented, rather than raising SIGILL. This | 518 | if not implemented, rather than raising SIGILL. This |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index ade0e2222f59..3fefb43c67f7 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -422,3 +422,11 @@ config HAS_TLS_REG | |||
422 | assume directly accessing that register and always obtain the | 422 | assume directly accessing that register and always obtain the |
423 | expected value only on ARMv7 and above. | 423 | expected value only on ARMv7 and above. |
424 | 424 | ||
425 | config NEEDS_SYSCALL_FOR_CMPXCHG | ||
426 | bool | ||
427 | default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3) | ||
428 | help | ||
429 | SMP on a pre-ARMv6 processor? Well OK then. | ||
430 | Forget about fast user space cmpxchg support. | ||
431 | It is just not possible. | ||
432 | |||