diff options
author | Hirokazu Takata <takata@linux-m32r.org> | 2006-04-19 01:21:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-04-19 12:13:51 -0400 |
commit | 4127272c38619c56f0c1aa01d01c7bd757db70a1 (patch) | |
tree | d12ca9b4ba542a93957dc50c009928299ea66991 | |
parent | dd1c1e3e9ed04d33a698925238e527b7051f64b9 (diff) |
[PATCH] m32r: update switch_to macro for tuning
- Remove unnecessary push/pop's of the switch_to() macro
for performance tuning.
- Cosmetic updates: change __inline__ to inline, etc.
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Cc: NIIBE Yutaka <gniibe@fsij.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/m32r/kernel/entry.S | 6 | ||||
-rw-r--r-- | include/asm-m32r/system.h | 67 |
2 files changed, 26 insertions, 47 deletions
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index 5e4a0c8a5d3c..920bb742b7a2 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S | |||
@@ -132,7 +132,7 @@ VM_MASK = 0x00020000 | |||
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | ENTRY(ret_from_fork) | 134 | ENTRY(ret_from_fork) |
135 | ld r0, @sp+ | 135 | pop r0 |
136 | bl schedule_tail | 136 | bl schedule_tail |
137 | GET_THREAD_INFO(r8) | 137 | GET_THREAD_INFO(r8) |
138 | bra syscall_exit | 138 | bra syscall_exit |
@@ -310,7 +310,7 @@ ENTRY(ei_handler) | |||
310 | ; GET_ICU_STATUS; | 310 | ; GET_ICU_STATUS; |
311 | seth r0, #shigh(M32R_ICU_ISTS_ADDR) | 311 | seth r0, #shigh(M32R_ICU_ISTS_ADDR) |
312 | ld r0, @(low(M32R_ICU_ISTS_ADDR),r0) | 312 | ld r0, @(low(M32R_ICU_ISTS_ADDR),r0) |
313 | st r0, @-sp | 313 | push r0 |
314 | #if defined(CONFIG_SMP) | 314 | #if defined(CONFIG_SMP) |
315 | /* | 315 | /* |
316 | * If IRQ == 0 --> Nothing to do, Not write IMASK | 316 | * If IRQ == 0 --> Nothing to do, Not write IMASK |
@@ -547,7 +547,7 @@ check_end: | |||
547 | #endif /* CONFIG_PLAT_M32104UT */ | 547 | #endif /* CONFIG_PLAT_M32104UT */ |
548 | bl do_IRQ | 548 | bl do_IRQ |
549 | #endif /* CONFIG_SMP */ | 549 | #endif /* CONFIG_SMP */ |
550 | ld r14, @sp+ | 550 | pop r14 |
551 | seth r0, #shigh(M32R_ICU_IMASK_ADDR) | 551 | seth r0, #shigh(M32R_ICU_IMASK_ADDR) |
552 | st r14, @(low(M32R_ICU_IMASK_ADDR),r0) | 552 | st r14, @(low(M32R_ICU_IMASK_ADDR),r0) |
553 | #else | 553 | #else |
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index c5ab5da56d21..e55013f378e5 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h | |||
@@ -6,8 +6,8 @@ | |||
6 | * License. See the file "COPYING" in the main directory of this archive | 6 | * License. See the file "COPYING" in the main directory of this archive |
7 | * for more details. | 7 | * for more details. |
8 | * | 8 | * |
9 | * Copyright (C) 2001 by Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto | 9 | * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto |
10 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> | 10 | * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/config.h> | 13 | #include <linux/config.h> |
@@ -19,49 +19,28 @@ | |||
19 | * switch_to(prev, next) should switch from task `prev' to `next' | 19 | * switch_to(prev, next) should switch from task `prev' to `next' |
20 | * `prev' will never be the same as `next'. | 20 | * `prev' will never be the same as `next'. |
21 | * | 21 | * |
22 | * `next' and `prev' should be struct task_struct, but it isn't always defined | 22 | * `next' and `prev' should be task_t, but it isn't always defined |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #define switch_to(prev, next, last) do { \ | 25 | #define switch_to(prev, next, last) do { \ |
26 | register unsigned long arg0 __asm__ ("r0") = (unsigned long)prev; \ | ||
27 | register unsigned long arg1 __asm__ ("r1") = (unsigned long)next; \ | ||
28 | register unsigned long *oldsp __asm__ ("r2") = &(prev->thread.sp); \ | ||
29 | register unsigned long *newsp __asm__ ("r3") = &(next->thread.sp); \ | ||
30 | register unsigned long *oldlr __asm__ ("r4") = &(prev->thread.lr); \ | ||
31 | register unsigned long *newlr __asm__ ("r5") = &(next->thread.lr); \ | ||
32 | register struct task_struct *__last __asm__ ("r6"); \ | ||
33 | __asm__ __volatile__ ( \ | 26 | __asm__ __volatile__ ( \ |
34 | "st r8, @-r15 \n\t" \ | 27 | " seth lr, #high(1f) \n" \ |
35 | "st r9, @-r15 \n\t" \ | 28 | " or3 lr, lr, #low(1f) \n" \ |
36 | "st r10, @-r15 \n\t" \ | 29 | " st lr, @%4 ; store old LR \n" \ |
37 | "st r11, @-r15 \n\t" \ | 30 | " ld lr, @%5 ; load new LR \n" \ |
38 | "st r12, @-r15 \n\t" \ | 31 | " st sp, @%2 ; store old SP \n" \ |
39 | "st r13, @-r15 \n\t" \ | 32 | " ld sp, @%3 ; load new SP \n" \ |
40 | "st r14, @-r15 \n\t" \ | 33 | " push %1 ; store `prev' on new stack \n" \ |
41 | "seth r14, #high(1f) \n\t" \ | 34 | " jmp lr \n" \ |
42 | "or3 r14, r14, #low(1f) \n\t" \ | 35 | " .fillinsn \n" \ |
43 | "st r14, @r4 ; store old LR \n\t" \ | 36 | "1: \n" \ |
44 | "st r15, @r2 ; store old SP \n\t" \ | 37 | " pop %0 ; restore `__last' from new stack \n" \ |
45 | "ld r15, @r3 ; load new SP \n\t" \ | 38 | : "=r" (last) \ |
46 | "st r0, @-r15 ; store 'prev' onto new stack \n\t" \ | 39 | : "0" (prev), \ |
47 | "ld r14, @r5 ; load new LR \n\t" \ | 40 | "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \ |
48 | "jmp r14 \n\t" \ | 41 | "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \ |
49 | ".fillinsn \n " \ | 42 | : "memory", "lr" \ |
50 | "1: \n\t" \ | ||
51 | "ld r6, @r15+ ; load 'prev' from new stack \n\t" \ | ||
52 | "ld r14, @r15+ \n\t" \ | ||
53 | "ld r13, @r15+ \n\t" \ | ||
54 | "ld r12, @r15+ \n\t" \ | ||
55 | "ld r11, @r15+ \n\t" \ | ||
56 | "ld r10, @r15+ \n\t" \ | ||
57 | "ld r9, @r15+ \n\t" \ | ||
58 | "ld r8, @r15+ \n\t" \ | ||
59 | : "=&r" (__last) \ | ||
60 | : "r" (arg0), "r" (arg1), "r" (oldsp), "r" (newsp), \ | ||
61 | "r" (oldlr), "r" (newlr) \ | ||
62 | : "memory" \ | ||
63 | ); \ | 43 | ); \ |
64 | last = __last; \ | ||
65 | } while(0) | 44 | } while(0) |
66 | 45 | ||
67 | /* | 46 | /* |
@@ -167,8 +146,8 @@ extern void __xchg_called_with_bad_pointer(void); | |||
167 | #define DCACHE_CLEAR(reg0, reg1, addr) | 146 | #define DCACHE_CLEAR(reg0, reg1, addr) |
168 | #endif /* CONFIG_CHIP_M32700_TS1 */ | 147 | #endif /* CONFIG_CHIP_M32700_TS1 */ |
169 | 148 | ||
170 | static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, | 149 | static inline unsigned long |
171 | int size) | 150 | __xchg(unsigned long x, volatile void * ptr, int size) |
172 | { | 151 | { |
173 | unsigned long flags; | 152 | unsigned long flags; |
174 | unsigned long tmp = 0; | 153 | unsigned long tmp = 0; |
@@ -220,7 +199,7 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, | |||
220 | 199 | ||
221 | #define __HAVE_ARCH_CMPXCHG 1 | 200 | #define __HAVE_ARCH_CMPXCHG 1 |
222 | 201 | ||
223 | static __inline__ unsigned long | 202 | static inline unsigned long |
224 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) | 203 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) |
225 | { | 204 | { |
226 | unsigned long flags; | 205 | unsigned long flags; |
@@ -254,7 +233,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) | |||
254 | if something tries to do an invalid cmpxchg(). */ | 233 | if something tries to do an invalid cmpxchg(). */ |
255 | extern void __cmpxchg_called_with_bad_pointer(void); | 234 | extern void __cmpxchg_called_with_bad_pointer(void); |
256 | 235 | ||
257 | static __inline__ unsigned long | 236 | static inline unsigned long |
258 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | 237 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) |
259 | { | 238 | { |
260 | switch (size) { | 239 | switch (size) { |