aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel
diff options
context:
space:
mode:
authorJohn David Anglin <dave@hiauly1.hia.nrc.ca>2010-04-11 13:26:34 -0400
committerKyle McMartin <kyle@dreadnought.i.jkkm.org>2010-05-30 05:46:37 -0400
commitf4c0346c6f350d51aac7ed87e266a4257bdbe506 (patch)
treed2c9649a20d307f62d90fbcbd99ecae2ee1cfe28 /arch/parisc/kernel
parentc2dc988ec566429841dd83644479aca78a6251e7 (diff)
parisc: LWS fixes for syscall.S
1) Gate immediately and save a branch. 2) Fix off by one error in checking entry number. 3) Use sr7 instead of sr3 in error return path as sr3 might not contain correct value. 4) Enable locking on UP systems to prevent incorrect operation of the cas_action critical region on page faults. Tested on several systems, including UP c3750 with 2.6.33.2 kernel. Signed-off-by: John David Anglin <dave.anglin@nrc-cnrc.gc.ca> Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r--arch/parisc/kernel/syscall.S32
1 files changed, 9 insertions, 23 deletions
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index f5f96021caa0..68e75ce838d6 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -47,18 +47,17 @@ ENTRY(linux_gateway_page)
47 KILL_INSN 47 KILL_INSN
48 .endr 48 .endr
49 49
50 /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */ 50 /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
51 /* Light-weight-syscall entry must always be located at 0xb0 */ 51 /* Light-weight-syscall entry must always be located at 0xb0 */
52 /* WARNING: Keep this number updated with table size changes */ 52 /* WARNING: Keep this number updated with table size changes */
53#define __NR_lws_entries (2) 53#define __NR_lws_entries (2)
54 54
55lws_entry: 55lws_entry:
56 /* Unconditional branch to lws_start, located on the 56 gate lws_start, %r0 /* increase privilege */
57 same gateway page */ 57 depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
58 b,n lws_start
59 58
60 /* Fill from 0xb4 to 0xe0 */ 59 /* Fill from 0xb8 to 0xe0 */
61 .rept 11 60 .rept 10
62 KILL_INSN 61 KILL_INSN
63 .endr 62 .endr
64 63
@@ -423,9 +422,6 @@ tracesys_sigexit:
423 422
424 *********************************************************/ 423 *********************************************************/
425lws_start: 424lws_start:
426 /* Gate and ensure we return to userspace */
427 gate .+8, %r0
428 depi 3, 31, 2, %r31 /* Ensure we return to userspace */
429 425
430#ifdef CONFIG_64BIT 426#ifdef CONFIG_64BIT
431 /* FIXME: If we are a 64-bit kernel just 427 /* FIXME: If we are a 64-bit kernel just
@@ -442,7 +438,7 @@ lws_start:
442#endif 438#endif
443 439
444 /* Is the lws entry number valid? */ 440 /* Is the lws entry number valid? */
445 comiclr,>>= __NR_lws_entries, %r20, %r0 441 comiclr,>> __NR_lws_entries, %r20, %r0
446 b,n lws_exit_nosys 442 b,n lws_exit_nosys
447 443
448 /* WARNING: Trashing sr2 and sr3 */ 444 /* WARNING: Trashing sr2 and sr3 */
@@ -473,7 +469,7 @@ lws_exit:
473 /* now reset the lowest bit of sp if it was set */ 469 /* now reset the lowest bit of sp if it was set */
474 xor %r30,%r1,%r30 470 xor %r30,%r1,%r30
475#endif 471#endif
476 be,n 0(%sr3, %r31) 472 be,n 0(%sr7, %r31)
477 473
478 474
479 475
@@ -529,7 +525,6 @@ lws_compare_and_swap32:
529#endif 525#endif
530 526
531lws_compare_and_swap: 527lws_compare_and_swap:
532#ifdef CONFIG_SMP
533 /* Load start of lock table */ 528 /* Load start of lock table */
534 ldil L%lws_lock_start, %r20 529 ldil L%lws_lock_start, %r20
535 ldo R%lws_lock_start(%r20), %r28 530 ldo R%lws_lock_start(%r20), %r28
@@ -572,8 +567,6 @@ cas_wouldblock:
572 ldo 2(%r0), %r28 /* 2nd case */ 567 ldo 2(%r0), %r28 /* 2nd case */
573 b lws_exit /* Contended... */ 568 b lws_exit /* Contended... */
574 ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ 569 ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
575#endif
576/* CONFIG_SMP */
577 570
578 /* 571 /*
579 prev = *addr; 572 prev = *addr;
@@ -601,13 +594,11 @@ cas_action:
6011: ldw 0(%sr3,%r26), %r28 5941: ldw 0(%sr3,%r26), %r28
602 sub,<> %r28, %r25, %r0 595 sub,<> %r28, %r25, %r0
6032: stw %r24, 0(%sr3,%r26) 5962: stw %r24, 0(%sr3,%r26)
604#ifdef CONFIG_SMP
605 /* Free lock */ 597 /* Free lock */
606 stw %r20, 0(%sr2,%r20) 598 stw %r20, 0(%sr2,%r20)
607# if ENABLE_LWS_DEBUG 599#if ENABLE_LWS_DEBUG
608 /* Clear thread register indicator */ 600 /* Clear thread register indicator */
609 stw %r0, 4(%sr2,%r20) 601 stw %r0, 4(%sr2,%r20)
610# endif
611#endif 602#endif
612 /* Return to userspace, set no error */ 603 /* Return to userspace, set no error */
613 b lws_exit 604 b lws_exit
@@ -615,12 +606,10 @@ cas_action:
615 606
6163: 6073:
617 /* Error occured on load or store */ 608 /* Error occured on load or store */
618#ifdef CONFIG_SMP
619 /* Free lock */ 609 /* Free lock */
620 stw %r20, 0(%sr2,%r20) 610 stw %r20, 0(%sr2,%r20)
621# if ENABLE_LWS_DEBUG 611#if ENABLE_LWS_DEBUG
622 stw %r0, 4(%sr2,%r20) 612 stw %r0, 4(%sr2,%r20)
623# endif
624#endif 613#endif
625 b lws_exit 614 b lws_exit
626 ldo -EFAULT(%r0),%r21 /* set errno */ 615 ldo -EFAULT(%r0),%r21 /* set errno */
@@ -672,7 +661,6 @@ ENTRY(sys_call_table64)
672END(sys_call_table64) 661END(sys_call_table64)
673#endif 662#endif
674 663
675#ifdef CONFIG_SMP
676 /* 664 /*
677 All light-weight-syscall atomic operations 665 All light-weight-syscall atomic operations
678 will use this set of locks 666 will use this set of locks
@@ -694,8 +682,6 @@ ENTRY(lws_lock_start)
694 .endr 682 .endr
695END(lws_lock_start) 683END(lws_lock_start)
696 .previous 684 .previous
697#endif
698/* CONFIG_SMP for lws_lock_start */
699 685
700.end 686.end
701 687