diff options
Diffstat (limited to 'arch/mips/kernel/entry.S')
-rw-r--r-- | arch/mips/kernel/entry.S | 54 |
1 files changed, 23 insertions, 31 deletions
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 5eb429137e06..83c87fe4ee4f 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -19,11 +19,11 @@ | |||
19 | #include <asm/war.h> | 19 | #include <asm/war.h> |
20 | 20 | ||
21 | #ifdef CONFIG_PREEMPT | 21 | #ifdef CONFIG_PREEMPT |
22 | .macro preempt_stop reg=t0 | 22 | .macro preempt_stop |
23 | .endm | 23 | .endm |
24 | #else | 24 | #else |
25 | .macro preempt_stop reg=t0 | 25 | .macro preempt_stop |
26 | local_irq_disable \reg | 26 | local_irq_disable |
27 | .endm | 27 | .endm |
28 | #define resume_kernel restore_all | 28 | #define resume_kernel restore_all |
29 | #endif | 29 | #endif |
@@ -37,17 +37,18 @@ FEXPORT(ret_from_irq) | |||
37 | andi t0, t0, KU_USER | 37 | andi t0, t0, KU_USER |
38 | beqz t0, resume_kernel | 38 | beqz t0, resume_kernel |
39 | 39 | ||
40 | FEXPORT(resume_userspace) | 40 | resume_userspace: |
41 | local_irq_disable t0 # make sure we dont miss an | 41 | local_irq_disable # make sure we dont miss an |
42 | # interrupt setting need_resched | 42 | # interrupt setting need_resched |
43 | # between sampling and return | 43 | # between sampling and return |
44 | LONG_L a2, TI_FLAGS($28) # current->work | 44 | LONG_L a2, TI_FLAGS($28) # current->work |
45 | andi a2, _TIF_WORK_MASK # (ignoring syscall_trace) | 45 | andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) |
46 | bnez a2, work_pending | 46 | bnez t0, work_pending |
47 | j restore_all | 47 | j restore_all |
48 | 48 | ||
49 | #ifdef CONFIG_PREEMPT | 49 | #ifdef CONFIG_PREEMPT |
50 | ENTRY(resume_kernel) | 50 | resume_kernel: |
51 | local_irq_disable | ||
51 | lw t0, TI_PRE_COUNT($28) | 52 | lw t0, TI_PRE_COUNT($28) |
52 | bnez t0, restore_all | 53 | bnez t0, restore_all |
53 | need_resched: | 54 | need_resched: |
@@ -57,12 +58,7 @@ need_resched: | |||
57 | LONG_L t0, PT_STATUS(sp) # Interrupts off? | 58 | LONG_L t0, PT_STATUS(sp) # Interrupts off? |
58 | andi t0, 1 | 59 | andi t0, 1 |
59 | beqz t0, restore_all | 60 | beqz t0, restore_all |
60 | li t0, PREEMPT_ACTIVE | 61 | jal preempt_schedule_irq |
61 | sw t0, TI_PRE_COUNT($28) | ||
62 | local_irq_enable t0 | ||
63 | jal schedule | ||
64 | sw zero, TI_PRE_COUNT($28) | ||
65 | local_irq_disable t0 | ||
66 | b need_resched | 62 | b need_resched |
67 | #endif | 63 | #endif |
68 | 64 | ||
@@ -88,13 +84,13 @@ FEXPORT(restore_partial) # restore partial frame | |||
88 | RESTORE_SP_AND_RET | 84 | RESTORE_SP_AND_RET |
89 | .set at | 85 | .set at |
90 | 86 | ||
91 | FEXPORT(work_pending) | 87 | work_pending: |
92 | andi t0, a2, _TIF_NEED_RESCHED | 88 | andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS |
93 | beqz t0, work_notifysig | 89 | beqz t0, work_notifysig |
94 | work_resched: | 90 | work_resched: |
95 | jal schedule | 91 | jal schedule |
96 | 92 | ||
97 | local_irq_disable t0 # make sure need_resched and | 93 | local_irq_disable # make sure need_resched and |
98 | # signals dont change between | 94 | # signals dont change between |
99 | # sampling and return | 95 | # sampling and return |
100 | LONG_L a2, TI_FLAGS($28) | 96 | LONG_L a2, TI_FLAGS($28) |
@@ -109,15 +105,14 @@ work_notifysig: # deal with pending signals and | |||
109 | move a0, sp | 105 | move a0, sp |
110 | li a1, 0 | 106 | li a1, 0 |
111 | jal do_notify_resume # a2 already loaded | 107 | jal do_notify_resume # a2 already loaded |
112 | j restore_all | 108 | j resume_userspace |
113 | 109 | ||
114 | FEXPORT(syscall_exit_work_partial) | 110 | FEXPORT(syscall_exit_work_partial) |
115 | SAVE_STATIC | 111 | SAVE_STATIC |
116 | FEXPORT(syscall_exit_work) | 112 | syscall_exit_work: |
117 | LONG_L t0, TI_FLAGS($28) | 113 | li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT |
118 | li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | 114 | and t0, a2 # a2 is preloaded with TI_FLAGS |
119 | and t0, t1 | 115 | beqz t0, work_pending # trace bit set? |
120 | beqz t0, work_pending # trace bit is set | ||
121 | local_irq_enable # could let do_syscall_trace() | 116 | local_irq_enable # could let do_syscall_trace() |
122 | # call schedule() instead | 117 | # call schedule() instead |
123 | move a0, sp | 118 | move a0, sp |
@@ -128,28 +123,25 @@ FEXPORT(syscall_exit_work) | |||
128 | /* | 123 | /* |
129 | * Common spurious interrupt handler. | 124 | * Common spurious interrupt handler. |
130 | */ | 125 | */ |
131 | .text | ||
132 | .align 5 | ||
133 | LEAF(spurious_interrupt) | 126 | LEAF(spurious_interrupt) |
134 | /* | 127 | /* |
135 | * Someone tried to fool us by sending an interrupt but we | 128 | * Someone tried to fool us by sending an interrupt but we |
136 | * couldn't find a cause for it. | 129 | * couldn't find a cause for it. |
137 | */ | 130 | */ |
131 | PTR_LA t1, irq_err_count | ||
138 | #ifdef CONFIG_SMP | 132 | #ifdef CONFIG_SMP |
139 | lui t1, %hi(irq_err_count) | 133 | 1: ll t0, (t1) |
140 | 1: ll t0, %lo(irq_err_count)(t1) | ||
141 | addiu t0, 1 | 134 | addiu t0, 1 |
142 | sc t0, %lo(irq_err_count)(t1) | 135 | sc t0, (t1) |
143 | #if R10000_LLSC_WAR | 136 | #if R10000_LLSC_WAR |
144 | beqzl t0, 1b | 137 | beqzl t0, 1b |
145 | #else | 138 | #else |
146 | beqz t0, 1b | 139 | beqz t0, 1b |
147 | #endif | 140 | #endif |
148 | #else | 141 | #else |
149 | lui t1, %hi(irq_err_count) | 142 | lw t0, (t1) |
150 | lw t0, %lo(irq_err_count)(t1) | ||
151 | addiu t0, 1 | 143 | addiu t0, 1 |
152 | sw t0, %lo(irq_err_count)(t1) | 144 | sw t0, (t1) |
153 | #endif | 145 | #endif |
154 | j ret_from_irq | 146 | j ret_from_irq |
155 | END(spurious_interrupt) | 147 | END(spurious_interrupt) |