diff options
author | Michal Simek <monstr@monstr.eu> | 2010-01-22 04:24:06 -0500 |
---|---|---|
committer | Michal Simek <monstr@monstr.eu> | 2010-03-11 08:25:30 -0500 |
commit | b1d70c62fff3e8b6224699801c610c244882685a (patch) | |
tree | 478a60fc008e69f300ab4ea1fc4b68c1f0f3a4bf /arch | |
parent | 79bf3a137617e6deeac411c39f1660b7e91d6348 (diff) |
microblaze: Simplify entry.S - save/restore r3/r4 - ret_from_trap
There is possible to save r3/r4 at the beggining of user part
before calling handlers and at the end restore it.
Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/microblaze/kernel/entry.S | 78 |
1 files changed, 30 insertions, 48 deletions
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index 1a6729dde49e..772fe7415f82 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S | |||
@@ -305,7 +305,7 @@ C_ENTRY(_user_exception): | |||
305 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | 305 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ |
306 | addi r11, r0, 1; | 306 | addi r11, r0, 1; |
307 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ | 307 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ |
308 | 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ | 308 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
309 | /* Save away the syscall number. */ | 309 | /* Save away the syscall number. */ |
310 | swi r12, r1, PTO+PT_R0; | 310 | swi r12, r1, PTO+PT_R0; |
311 | tovirt(r1,r1) | 311 | tovirt(r1,r1) |
@@ -322,8 +322,7 @@ C_ENTRY(_user_exception): | |||
322 | rtid r11, 0 | 322 | rtid r11, 0 |
323 | nop | 323 | nop |
324 | 3: | 324 | 3: |
325 | add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */ | 325 | lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */ |
326 | lwi r11, r11, TS_THREAD_INFO /* get thread info */ | ||
327 | lwi r11, r11, TI_FLAGS /* get flags in thread info */ | 326 | lwi r11, r11, TI_FLAGS /* get flags in thread info */ |
328 | andi r11, r11, _TIF_WORK_SYSCALL_MASK | 327 | andi r11, r11, _TIF_WORK_SYSCALL_MASK |
329 | beqi r11, 4f | 328 | beqi r11, 4f |
@@ -382,58 +381,50 @@ C_ENTRY(ret_from_trap): | |||
382 | /* See if returning to kernel mode, if so, skip resched &c. */ | 381 | /* See if returning to kernel mode, if so, skip resched &c. */ |
383 | bnei r11, 2f; | 382 | bnei r11, 2f; |
384 | 383 | ||
384 | swi r3, r1, PTO + PT_R3 | ||
385 | swi r4, r1, PTO + PT_R4 | ||
386 | |||
385 | /* We're returning to user mode, so check for various conditions that | 387 | /* We're returning to user mode, so check for various conditions that |
386 | * trigger rescheduling. */ | 388 | * trigger rescheduling. */ |
387 | # FIXME: Restructure all these flag checks. | 389 | /* FIXME: Restructure all these flag checks. */ |
388 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 390 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
389 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
390 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 391 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
391 | andi r11, r11, _TIF_WORK_SYSCALL_MASK | 392 | andi r11, r11, _TIF_WORK_SYSCALL_MASK |
392 | beqi r11, 1f | 393 | beqi r11, 1f |
393 | 394 | ||
394 | swi r3, r1, PTO + PT_R3 | ||
395 | swi r4, r1, PTO + PT_R4 | ||
396 | brlid r15, do_syscall_trace_leave | 395 | brlid r15, do_syscall_trace_leave |
397 | addik r5, r1, PTO + PT_R0 | 396 | addik r5, r1, PTO + PT_R0 |
398 | lwi r3, r1, PTO + PT_R3 | ||
399 | lwi r4, r1, PTO + PT_R4 | ||
400 | 1: | 397 | 1: |
401 | |||
402 | /* We're returning to user mode, so check for various conditions that | 398 | /* We're returning to user mode, so check for various conditions that |
403 | * trigger rescheduling. */ | 399 | * trigger rescheduling. */ |
404 | /* Get current task ptr into r11 */ | 400 | /* get thread info from current task */ |
405 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 401 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
406 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
407 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 402 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
408 | andi r11, r11, _TIF_NEED_RESCHED; | 403 | andi r11, r11, _TIF_NEED_RESCHED; |
409 | beqi r11, 5f; | 404 | beqi r11, 5f; |
410 | 405 | ||
411 | swi r3, r1, PTO + PT_R3; /* store syscall result */ | ||
412 | swi r4, r1, PTO + PT_R4; | ||
413 | bralid r15, schedule; /* Call scheduler */ | 406 | bralid r15, schedule; /* Call scheduler */ |
414 | nop; /* delay slot */ | 407 | nop; /* delay slot */ |
415 | lwi r3, r1, PTO + PT_R3; /* restore syscall result */ | ||
416 | lwi r4, r1, PTO + PT_R4; | ||
417 | 408 | ||
418 | /* Maybe handle a signal */ | 409 | /* Maybe handle a signal */ |
419 | 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 410 | 5: /* get thread info from current task*/ |
420 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | 411 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
421 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 412 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
422 | andi r11, r11, _TIF_SIGPENDING; | 413 | andi r11, r11, _TIF_SIGPENDING; |
423 | beqi r11, 1f; /* Signals to handle, handle them */ | 414 | beqi r11, 1f; /* Signals to handle, handle them */ |
424 | 415 | ||
425 | swi r3, r1, PTO + PT_R3; /* store syscall result */ | ||
426 | swi r4, r1, PTO + PT_R4; | ||
427 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ | 416 | la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ |
428 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ | 417 | add r6, r0, r0; /* Arg 2: sigset_t *oldset */ |
429 | addi r7, r0, 1; /* Arg 3: int in_syscall */ | 418 | addi r7, r0, 1; /* Arg 3: int in_syscall */ |
430 | bralid r15, do_signal; /* Handle any signals */ | 419 | bralid r15, do_signal; /* Handle any signals */ |
431 | nop; | 420 | nop; |
421 | |||
422 | /* Finally, return to user state. */ | ||
423 | 1: | ||
432 | lwi r3, r1, PTO + PT_R3; /* restore syscall result */ | 424 | lwi r3, r1, PTO + PT_R3; /* restore syscall result */ |
433 | lwi r4, r1, PTO + PT_R4; | 425 | lwi r4, r1, PTO + PT_R4; |
434 | 426 | ||
435 | /* Finally, return to user state. */ | 427 | swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ |
436 | 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ | ||
437 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 428 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ |
438 | swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ | 429 | swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
439 | VM_OFF; | 430 | VM_OFF; |
@@ -565,7 +556,7 @@ C_ENTRY(sys_rt_sigreturn_wrapper): | |||
565 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ | 556 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ |
566 | addi r11, r0, 1; \ | 557 | addi r11, r0, 1; \ |
567 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ | 558 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ |
568 | 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ | 559 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \ |
569 | /* Save away the syscall number. */ \ | 560 | /* Save away the syscall number. */ \ |
570 | swi r0, r1, PTO+PT_R0; \ | 561 | swi r0, r1, PTO+PT_R0; \ |
571 | tovirt(r1,r1) | 562 | tovirt(r1,r1) |
@@ -673,9 +664,7 @@ C_ENTRY(ret_from_exc): | |||
673 | 664 | ||
674 | /* We're returning to user mode, so check for various conditions that | 665 | /* We're returning to user mode, so check for various conditions that |
675 | trigger rescheduling. */ | 666 | trigger rescheduling. */ |
676 | /* Get current task ptr into r11 */ | 667 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
677 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | ||
678 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
679 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 668 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
680 | andi r11, r11, _TIF_NEED_RESCHED; | 669 | andi r11, r11, _TIF_NEED_RESCHED; |
681 | beqi r11, 5f; | 670 | beqi r11, 5f; |
@@ -685,8 +674,7 @@ C_ENTRY(ret_from_exc): | |||
685 | nop; /* delay slot */ | 674 | nop; /* delay slot */ |
686 | 675 | ||
687 | /* Maybe handle a signal */ | 676 | /* Maybe handle a signal */ |
688 | 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 677 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
689 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
690 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 678 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
691 | andi r11, r11, _TIF_SIGPENDING; | 679 | andi r11, r11, _TIF_SIGPENDING; |
692 | beqi r11, 1f; /* Signals to handle, handle them */ | 680 | beqi r11, 1f; /* Signals to handle, handle them */ |
@@ -802,7 +790,7 @@ C_ENTRY(_interrupt): | |||
802 | swi r11, r0, TOPHYS(PER_CPU(KM)); | 790 | swi r11, r0, TOPHYS(PER_CPU(KM)); |
803 | 791 | ||
804 | 2: | 792 | 2: |
805 | lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | 793 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
806 | swi r0, r1, PTO + PT_R0; | 794 | swi r0, r1, PTO + PT_R0; |
807 | tovirt(r1,r1) | 795 | tovirt(r1,r1) |
808 | la r5, r1, PTO; | 796 | la r5, r1, PTO; |
@@ -817,8 +805,7 @@ ret_from_irq: | |||
817 | lwi r11, r1, PTO + PT_MODE; | 805 | lwi r11, r1, PTO + PT_MODE; |
818 | bnei r11, 2f; | 806 | bnei r11, 2f; |
819 | 807 | ||
820 | add r11, r0, CURRENT_TASK; | 808 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
821 | lwi r11, r11, TS_THREAD_INFO; | ||
822 | lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ | 809 | lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ |
823 | andi r11, r11, _TIF_NEED_RESCHED; | 810 | andi r11, r11, _TIF_NEED_RESCHED; |
824 | beqi r11, 5f | 811 | beqi r11, 5f |
@@ -826,8 +813,7 @@ ret_from_irq: | |||
826 | nop; /* delay slot */ | 813 | nop; /* delay slot */ |
827 | 814 | ||
828 | /* Maybe handle a signal */ | 815 | /* Maybe handle a signal */ |
829 | 5: add r11, r0, CURRENT_TASK; | 816 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */ |
830 | lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */ | ||
831 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 817 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
832 | andi r11, r11, _TIF_SIGPENDING; | 818 | andi r11, r11, _TIF_SIGPENDING; |
833 | beqid r11, no_intr_resched | 819 | beqid r11, no_intr_resched |
@@ -855,8 +841,7 @@ no_intr_resched: | |||
855 | /* MS: Return to kernel state. */ | 841 | /* MS: Return to kernel state. */ |
856 | 2: | 842 | 2: |
857 | #ifdef CONFIG_PREEMPT | 843 | #ifdef CONFIG_PREEMPT |
858 | add r11, r0, CURRENT_TASK; | 844 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
859 | lwi r11, r11, TS_THREAD_INFO; | ||
860 | /* MS: get preempt_count from thread info */ | 845 | /* MS: get preempt_count from thread info */ |
861 | lwi r5, r11, TI_PREEMPT_COUNT; | 846 | lwi r5, r11, TI_PREEMPT_COUNT; |
862 | bgti r5, restore; | 847 | bgti r5, restore; |
@@ -869,8 +854,7 @@ preempt: | |||
869 | /* interrupts are off that's why I am calling preempt_chedule_irq */ | 854 | /* interrupts are off that's why I am calling preempt_chedule_irq */ |
870 | bralid r15, preempt_schedule_irq | 855 | bralid r15, preempt_schedule_irq |
871 | nop | 856 | nop |
872 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 857 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
873 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
874 | lwi r5, r11, TI_FLAGS; /* get flags in thread info */ | 858 | lwi r5, r11, TI_FLAGS; /* get flags in thread info */ |
875 | andi r5, r5, _TIF_NEED_RESCHED; | 859 | andi r5, r5, _TIF_NEED_RESCHED; |
876 | bnei r5, preempt /* if non zero jump to resched */ | 860 | bnei r5, preempt /* if non zero jump to resched */ |
@@ -938,7 +922,7 @@ C_ENTRY(_debug_exception): | |||
938 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ | 922 | swi r11, r1, PTO+PT_R1; /* Store user SP. */ |
939 | addi r11, r0, 1; | 923 | addi r11, r0, 1; |
940 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ | 924 | swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ |
941 | 2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ | 925 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
942 | /* Save away the syscall number. */ | 926 | /* Save away the syscall number. */ |
943 | swi r0, r1, PTO+PT_R0; | 927 | swi r0, r1, PTO+PT_R0; |
944 | tovirt(r1,r1) | 928 | tovirt(r1,r1) |
@@ -958,8 +942,7 @@ dbtrap_call: rtbd r11, 0; | |||
958 | bnei r11, 2f; | 942 | bnei r11, 2f; |
959 | 943 | ||
960 | /* Get current task ptr into r11 */ | 944 | /* Get current task ptr into r11 */ |
961 | add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 945 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
962 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
963 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 946 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
964 | andi r11, r11, _TIF_NEED_RESCHED; | 947 | andi r11, r11, _TIF_NEED_RESCHED; |
965 | beqi r11, 5f; | 948 | beqi r11, 5f; |
@@ -972,8 +955,7 @@ dbtrap_call: rtbd r11, 0; | |||
972 | /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ | 955 | /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ |
973 | 956 | ||
974 | /* Maybe handle a signal */ | 957 | /* Maybe handle a signal */ |
975 | 5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ | 958 | 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
976 | lwi r11, r11, TS_THREAD_INFO; /* get thread info */ | ||
977 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ | 959 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
978 | andi r11, r11, _TIF_SIGPENDING; | 960 | andi r11, r11, _TIF_SIGPENDING; |
979 | beqi r11, 1f; /* Signals to handle, handle them */ | 961 | beqi r11, 1f; /* Signals to handle, handle them */ |
@@ -1030,7 +1012,7 @@ DBTRAP_return: /* Make global symbol for debugging */ | |||
1030 | 1012 | ||
1031 | ENTRY(_switch_to) | 1013 | ENTRY(_switch_to) |
1032 | /* prepare return value */ | 1014 | /* prepare return value */ |
1033 | addk r3, r0, r31 | 1015 | addk r3, r0, CURRENT_TASK |
1034 | 1016 | ||
1035 | /* save registers in cpu_context */ | 1017 | /* save registers in cpu_context */ |
1036 | /* use r11 and r12, volatile registers, as temp register */ | 1018 | /* use r11 and r12, volatile registers, as temp register */ |
@@ -1074,10 +1056,10 @@ ENTRY(_switch_to) | |||
1074 | nop | 1056 | nop |
1075 | swi r12, r11, CC_FSR | 1057 | swi r12, r11, CC_FSR |
1076 | 1058 | ||
1077 | /* update r31, the current */ | 1059 | /* update r31, the current-give me pointer to task which will be next */ |
1078 | lwi r31, r6, TI_TASK/* give me pointer to task which will be next */ | 1060 | lwi CURRENT_TASK, r6, TI_TASK |
1079 | /* stored it to current_save too */ | 1061 | /* stored it to current_save too */ |
1080 | swi r31, r0, PER_CPU(CURRENT_SAVE) | 1062 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE) |
1081 | 1063 | ||
1082 | /* get new process' cpu context and restore */ | 1064 | /* get new process' cpu context and restore */ |
1083 | /* give me start where start context of next task */ | 1065 | /* give me start where start context of next task */ |