diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-04-20 18:36:05 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-04-20 18:36:05 -0400 |
commit | 857c68f733eea07f11a061caea43a38fed61adb7 (patch) | |
tree | 4567a9e91f717b22143ac1ccb0fb5323ac15afd3 /include | |
parent | 4741c336d27dec3ea68a35659abb8dc82b142388 (diff) | |
parent | 402a26f0c040077ed6f941eefac5a6971f0d5f40 (diff) |
Merge branch 'master'
Diffstat (limited to 'include')
62 files changed, 1391 insertions, 231 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index 22d80ece95cb..4ddce5296a78 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h | |||
@@ -183,6 +183,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) | |||
183 | { | 183 | { |
184 | int __i; | 184 | int __i; |
185 | #ifdef CONFIG_M386 | 185 | #ifdef CONFIG_M386 |
186 | unsigned long flags; | ||
186 | if(unlikely(boot_cpu_data.x86==3)) | 187 | if(unlikely(boot_cpu_data.x86==3)) |
187 | goto no_xadd; | 188 | goto no_xadd; |
188 | #endif | 189 | #endif |
@@ -196,10 +197,10 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) | |||
196 | 197 | ||
197 | #ifdef CONFIG_M386 | 198 | #ifdef CONFIG_M386 |
198 | no_xadd: /* Legacy 386 processor */ | 199 | no_xadd: /* Legacy 386 processor */ |
199 | local_irq_disable(); | 200 | local_irq_save(flags); |
200 | __i = atomic_read(v); | 201 | __i = atomic_read(v); |
201 | atomic_set(v, i + __i); | 202 | atomic_set(v, i + __i); |
202 | local_irq_enable(); | 203 | local_irq_restore(flags); |
203 | return i + __i; | 204 | return i + __i; |
204 | #endif | 205 | #endif |
205 | } | 206 | } |
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h index 5c0b5876b931..b44bfc6239cb 100644 --- a/include/asm-i386/cpufeature.h +++ b/include/asm-i386/cpufeature.h | |||
@@ -71,6 +71,7 @@ | |||
71 | #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ | 71 | #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ |
72 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ | 72 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ |
73 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ | 73 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ |
74 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ | ||
74 | 75 | ||
75 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 76 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
76 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 77 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ |
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h index 152d0baa576a..7b1f01191e70 100644 --- a/include/asm-i386/i387.h +++ b/include/asm-i386/i387.h | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/kernel_stat.h> | ||
16 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
17 | #include <asm/sigcontext.h> | 18 | #include <asm/sigcontext.h> |
18 | #include <asm/user.h> | 19 | #include <asm/user.h> |
@@ -38,17 +39,38 @@ extern void init_fpu(struct task_struct *); | |||
38 | extern void kernel_fpu_begin(void); | 39 | extern void kernel_fpu_begin(void); |
39 | #define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) | 40 | #define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) |
40 | 41 | ||
42 | /* We need a safe address that is cheap to find and that is already | ||
43 | in L1 during context switch. The best choices are unfortunately | ||
44 | different for UP and SMP */ | ||
45 | #ifdef CONFIG_SMP | ||
46 | #define safe_address (__per_cpu_offset[0]) | ||
47 | #else | ||
48 | #define safe_address (kstat_cpu(0).cpustat.user) | ||
49 | #endif | ||
50 | |||
41 | /* | 51 | /* |
42 | * These must be called with preempt disabled | 52 | * These must be called with preempt disabled |
43 | */ | 53 | */ |
44 | static inline void __save_init_fpu( struct task_struct *tsk ) | 54 | static inline void __save_init_fpu( struct task_struct *tsk ) |
45 | { | 55 | { |
56 | /* Use more nops than strictly needed in case the compiler | ||
57 | varies code */ | ||
46 | alternative_input( | 58 | alternative_input( |
47 | "fnsave %1 ; fwait ;" GENERIC_NOP2, | 59 | "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, |
48 | "fxsave %1 ; fnclex", | 60 | "fxsave %[fx]\n" |
61 | "bt $7,%[fsw] ; jc 1f ; fnclex\n1:", | ||
49 | X86_FEATURE_FXSR, | 62 | X86_FEATURE_FXSR, |
50 | "m" (tsk->thread.i387.fxsave) | 63 | [fx] "m" (tsk->thread.i387.fxsave), |
51 | :"memory"); | 64 | [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory"); |
65 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
66 | is pending. Clear the x87 state here by setting it to fixed | ||
67 | values. __per_cpu_offset[0] is a random variable that should be in L1 */ | ||
68 | alternative_input( | ||
69 | GENERIC_NOP8 GENERIC_NOP2, | ||
70 | "emms\n\t" /* clear stack tags */ | ||
71 | "fildl %[addr]", /* set F?P to defined value */ | ||
72 | X86_FEATURE_FXSAVE_LEAK, | ||
73 | [addr] "m" (safe_address)); | ||
52 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 74 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
53 | } | 75 | } |
54 | 76 | ||
diff --git a/include/asm-m32r/assembler.h b/include/asm-m32r/assembler.h index b7f4d8aaeb46..1a1aa17edd33 100644 --- a/include/asm-m32r/assembler.h +++ b/include/asm-m32r/assembler.h | |||
@@ -109,6 +109,9 @@ | |||
109 | push r13 | 109 | push r13 |
110 | mvfachi r13 | 110 | mvfachi r13 |
111 | push r13 | 111 | push r13 |
112 | ldi r13, #0 | ||
113 | push r13 ; dummy push acc1h | ||
114 | push r13 ; dummy push acc1l | ||
112 | #else | 115 | #else |
113 | #error unknown isa configuration | 116 | #error unknown isa configuration |
114 | #endif | 117 | #endif |
@@ -156,6 +159,8 @@ | |||
156 | pop r13 | 159 | pop r13 |
157 | mvtaclo r13, a1 | 160 | mvtaclo r13, a1 |
158 | #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R) | 161 | #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R) |
162 | pop r13 ; dummy pop acc1h | ||
163 | pop r13 ; dummy pop acc1l | ||
159 | pop r13 | 164 | pop r13 |
160 | mvtachi r13 | 165 | mvtachi r13 |
161 | pop r13 | 166 | pop r13 |
diff --git a/include/asm-m32r/mappi3/mappi3_pld.h b/include/asm-m32r/mappi3/mappi3_pld.h index 1d3c25d61bcb..031369a7afc8 100644 --- a/include/asm-m32r/mappi3/mappi3_pld.h +++ b/include/asm-m32r/mappi3/mappi3_pld.h | |||
@@ -53,16 +53,14 @@ | |||
53 | /* Power Control of MMC and CF */ | 53 | /* Power Control of MMC and CF */ |
54 | #define PLD_CPCR __reg16(PLD_BASE + 0x14000) | 54 | #define PLD_CPCR __reg16(PLD_BASE + 0x14000) |
55 | 55 | ||
56 | 56 | /* ICU */ | |
57 | /*==== ICU ====*/ | 57 | #define M32R_IRQ_PC104 (5) /* INT4(PC/104) */ |
58 | #define M32R_IRQ_PC104 (5) /* INT4(PC/104) */ | 58 | #define M32R_IRQ_I2C (28) /* I2C-BUS */ |
59 | #define M32R_IRQ_I2C (28) /* I2C-BUS */ | 59 | #define PLD_IRQ_CFIREQ (6) /* INT5 CFC Card Interrupt */ |
60 | #define PLD_IRQ_CFIREQ (6) /* INT5 CFC Card Interrupt */ | 60 | #define PLD_IRQ_CFC_INSERT (7) /* INT6 CFC Card Insert & Eject */ |
61 | #define PLD_IRQ_CFC_INSERT (7) /* INT6 CFC Card Insert */ | 61 | #define PLD_IRQ_IDEIREQ (8) /* INT7 IDE Interrupt */ |
62 | #define PLD_IRQ_IDEIREQ (8) /* INT7 IDE Interrupt */ | 62 | #define PLD_IRQ_MMCCARD (43) /* MMC Card Insert */ |
63 | #define PLD_IRQ_MMCCARD (43) /* MMC Card Insert */ | 63 | #define PLD_IRQ_MMCIRQ (44) /* MMC Transfer Done */ |
64 | #define PLD_IRQ_MMCIRQ (44) /* MMC Transfer Done */ | ||
65 | |||
66 | 64 | ||
67 | #if 0 | 65 | #if 0 |
68 | /* LED Control | 66 | /* LED Control |
@@ -97,7 +95,6 @@ | |||
97 | #define PLD_CRC16ADATA __reg16(PLD_BASE + 0x18008) | 95 | #define PLD_CRC16ADATA __reg16(PLD_BASE + 0x18008) |
98 | #define PLD_CRC16AINDATA __reg16(PLD_BASE + 0x1800a) | 96 | #define PLD_CRC16AINDATA __reg16(PLD_BASE + 0x1800a) |
99 | 97 | ||
100 | |||
101 | #if 0 | 98 | #if 0 |
102 | /* RTC */ | 99 | /* RTC */ |
103 | #define PLD_RTCCR __reg16(PLD_BASE + 0x1c000) | 100 | #define PLD_RTCCR __reg16(PLD_BASE + 0x1c000) |
@@ -140,4 +137,7 @@ | |||
140 | 137 | ||
141 | #endif | 138 | #endif |
142 | 139 | ||
140 | /* Reset Control */ | ||
141 | #define PLD_REBOOT __reg16(PLD_BASE + 0x38000) | ||
142 | |||
143 | #endif /* _MAPPI3_PLD.H */ | 143 | #endif /* _MAPPI3_PLD.H */ |
diff --git a/include/asm-m32r/ptrace.h b/include/asm-m32r/ptrace.h index 0d058b2d844e..53c792452dfc 100644 --- a/include/asm-m32r/ptrace.h +++ b/include/asm-m32r/ptrace.h | |||
@@ -43,6 +43,14 @@ | |||
43 | #define PT_ACC1L 18 | 43 | #define PT_ACC1L 18 |
44 | #define PT_ACCH PT_ACC0H | 44 | #define PT_ACCH PT_ACC0H |
45 | #define PT_ACCL PT_ACC0L | 45 | #define PT_ACCL PT_ACC0L |
46 | #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R) | ||
47 | #define PT_ACCH 15 | ||
48 | #define PT_ACCL 16 | ||
49 | #define PT_DUMMY_ACC1H 17 | ||
50 | #define PT_DUMMY_ACC1L 18 | ||
51 | #else | ||
52 | #error unknown isa conifiguration | ||
53 | #endif | ||
46 | #define PT_PSW 19 | 54 | #define PT_PSW 19 |
47 | #define PT_BPC 20 | 55 | #define PT_BPC 20 |
48 | #define PT_BBPSW 21 | 56 | #define PT_BBPSW 21 |
@@ -52,21 +60,6 @@ | |||
52 | #define PT_LR 25 | 60 | #define PT_LR 25 |
53 | #define PT_SPI 26 | 61 | #define PT_SPI 26 |
54 | #define PT_ORIGR0 27 | 62 | #define PT_ORIGR0 27 |
55 | #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R) | ||
56 | #define PT_ACCH 15 | ||
57 | #define PT_ACCL 16 | ||
58 | #define PT_PSW 17 | ||
59 | #define PT_BPC 18 | ||
60 | #define PT_BBPSW 19 | ||
61 | #define PT_BBPC 20 | ||
62 | #define PT_SPU 21 | ||
63 | #define PT_FP 22 | ||
64 | #define PT_LR 23 | ||
65 | #define PT_SPI 24 | ||
66 | #define PT_ORIGR0 25 | ||
67 | #else | ||
68 | #error unknown isa conifiguration | ||
69 | #endif | ||
70 | 63 | ||
71 | /* virtual pt_reg entry for gdb */ | 64 | /* virtual pt_reg entry for gdb */ |
72 | #define PT_PC 30 | 65 | #define PT_PC 30 |
@@ -121,6 +114,8 @@ struct pt_regs { | |||
121 | #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R) | 114 | #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R) |
122 | unsigned long acch; | 115 | unsigned long acch; |
123 | unsigned long accl; | 116 | unsigned long accl; |
117 | unsigned long dummy_acc1h; | ||
118 | unsigned long dummy_acc1l; | ||
124 | #else | 119 | #else |
125 | #error unknown isa configuration | 120 | #error unknown isa configuration |
126 | #endif | 121 | #endif |
diff --git a/include/asm-m32r/semaphore.h b/include/asm-m32r/semaphore.h index bf447c52a0a1..81750edc8916 100644 --- a/include/asm-m32r/semaphore.h +++ b/include/asm-m32r/semaphore.h | |||
@@ -9,7 +9,7 @@ | |||
9 | * SMP- and interrupt-safe semaphores.. | 9 | * SMP- and interrupt-safe semaphores.. |
10 | * | 10 | * |
11 | * Copyright (C) 1996 Linus Torvalds | 11 | * Copyright (C) 1996 Linus Torvalds |
12 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> | 12 | * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/config.h> | 15 | #include <linux/config.h> |
@@ -77,27 +77,8 @@ asmlinkage void __up(struct semaphore * sem); | |||
77 | */ | 77 | */ |
78 | static inline void down(struct semaphore * sem) | 78 | static inline void down(struct semaphore * sem) |
79 | { | 79 | { |
80 | unsigned long flags; | ||
81 | long count; | ||
82 | |||
83 | might_sleep(); | 80 | might_sleep(); |
84 | local_irq_save(flags); | 81 | if (unlikely(atomic_dec_return(&sem->count) < 0)) |
85 | __asm__ __volatile__ ( | ||
86 | "# down \n\t" | ||
87 | DCACHE_CLEAR("%0", "r4", "%1") | ||
88 | M32R_LOCK" %0, @%1; \n\t" | ||
89 | "addi %0, #-1; \n\t" | ||
90 | M32R_UNLOCK" %0, @%1; \n\t" | ||
91 | : "=&r" (count) | ||
92 | : "r" (&sem->count) | ||
93 | : "memory" | ||
94 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
95 | , "r4" | ||
96 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
97 | ); | ||
98 | local_irq_restore(flags); | ||
99 | |||
100 | if (unlikely(count < 0)) | ||
101 | __down(sem); | 82 | __down(sem); |
102 | } | 83 | } |
103 | 84 | ||
@@ -107,28 +88,10 @@ static inline void down(struct semaphore * sem) | |||
107 | */ | 88 | */ |
108 | static inline int down_interruptible(struct semaphore * sem) | 89 | static inline int down_interruptible(struct semaphore * sem) |
109 | { | 90 | { |
110 | unsigned long flags; | ||
111 | long count; | ||
112 | int result = 0; | 91 | int result = 0; |
113 | 92 | ||
114 | might_sleep(); | 93 | might_sleep(); |
115 | local_irq_save(flags); | 94 | if (unlikely(atomic_dec_return(&sem->count) < 0)) |
116 | __asm__ __volatile__ ( | ||
117 | "# down_interruptible \n\t" | ||
118 | DCACHE_CLEAR("%0", "r4", "%1") | ||
119 | M32R_LOCK" %0, @%1; \n\t" | ||
120 | "addi %0, #-1; \n\t" | ||
121 | M32R_UNLOCK" %0, @%1; \n\t" | ||
122 | : "=&r" (count) | ||
123 | : "r" (&sem->count) | ||
124 | : "memory" | ||
125 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
126 | , "r4" | ||
127 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
128 | ); | ||
129 | local_irq_restore(flags); | ||
130 | |||
131 | if (unlikely(count < 0)) | ||
132 | result = __down_interruptible(sem); | 95 | result = __down_interruptible(sem); |
133 | 96 | ||
134 | return result; | 97 | return result; |
@@ -174,26 +137,7 @@ static inline int down_trylock(struct semaphore * sem) | |||
174 | */ | 137 | */ |
175 | static inline void up(struct semaphore * sem) | 138 | static inline void up(struct semaphore * sem) |
176 | { | 139 | { |
177 | unsigned long flags; | 140 | if (unlikely(atomic_inc_return(&sem->count) <= 0)) |
178 | long count; | ||
179 | |||
180 | local_irq_save(flags); | ||
181 | __asm__ __volatile__ ( | ||
182 | "# up \n\t" | ||
183 | DCACHE_CLEAR("%0", "r4", "%1") | ||
184 | M32R_LOCK" %0, @%1; \n\t" | ||
185 | "addi %0, #1; \n\t" | ||
186 | M32R_UNLOCK" %0, @%1; \n\t" | ||
187 | : "=&r" (count) | ||
188 | : "r" (&sem->count) | ||
189 | : "memory" | ||
190 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
191 | , "r4" | ||
192 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
193 | ); | ||
194 | local_irq_restore(flags); | ||
195 | |||
196 | if (unlikely(count <= 0)) | ||
197 | __up(sem); | 141 | __up(sem); |
198 | } | 142 | } |
199 | 143 | ||
diff --git a/include/asm-m32r/sigcontext.h b/include/asm-m32r/sigcontext.h index c233e2def2a3..942b8a30937d 100644 --- a/include/asm-m32r/sigcontext.h +++ b/include/asm-m32r/sigcontext.h | |||
@@ -32,6 +32,8 @@ struct sigcontext { | |||
32 | #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R) | 32 | #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R) |
33 | unsigned long sc_acch; | 33 | unsigned long sc_acch; |
34 | unsigned long sc_accl; | 34 | unsigned long sc_accl; |
35 | unsigned long sc_dummy_acc1h; | ||
36 | unsigned long sc_dummy_acc1l; | ||
35 | #else | 37 | #else |
36 | #error unknown isa configuration | 38 | #error unknown isa configuration |
37 | #endif | 39 | #endif |
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index c5ab5da56d21..e55013f378e5 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h | |||
@@ -6,8 +6,8 @@ | |||
6 | * License. See the file "COPYING" in the main directory of this archive | 6 | * License. See the file "COPYING" in the main directory of this archive |
7 | * for more details. | 7 | * for more details. |
8 | * | 8 | * |
9 | * Copyright (C) 2001 by Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto | 9 | * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto |
10 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> | 10 | * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/config.h> | 13 | #include <linux/config.h> |
@@ -19,49 +19,28 @@ | |||
19 | * switch_to(prev, next) should switch from task `prev' to `next' | 19 | * switch_to(prev, next) should switch from task `prev' to `next' |
20 | * `prev' will never be the same as `next'. | 20 | * `prev' will never be the same as `next'. |
21 | * | 21 | * |
22 | * `next' and `prev' should be struct task_struct, but it isn't always defined | 22 | * `next' and `prev' should be task_t, but it isn't always defined |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #define switch_to(prev, next, last) do { \ | 25 | #define switch_to(prev, next, last) do { \ |
26 | register unsigned long arg0 __asm__ ("r0") = (unsigned long)prev; \ | ||
27 | register unsigned long arg1 __asm__ ("r1") = (unsigned long)next; \ | ||
28 | register unsigned long *oldsp __asm__ ("r2") = &(prev->thread.sp); \ | ||
29 | register unsigned long *newsp __asm__ ("r3") = &(next->thread.sp); \ | ||
30 | register unsigned long *oldlr __asm__ ("r4") = &(prev->thread.lr); \ | ||
31 | register unsigned long *newlr __asm__ ("r5") = &(next->thread.lr); \ | ||
32 | register struct task_struct *__last __asm__ ("r6"); \ | ||
33 | __asm__ __volatile__ ( \ | 26 | __asm__ __volatile__ ( \ |
34 | "st r8, @-r15 \n\t" \ | 27 | " seth lr, #high(1f) \n" \ |
35 | "st r9, @-r15 \n\t" \ | 28 | " or3 lr, lr, #low(1f) \n" \ |
36 | "st r10, @-r15 \n\t" \ | 29 | " st lr, @%4 ; store old LR \n" \ |
37 | "st r11, @-r15 \n\t" \ | 30 | " ld lr, @%5 ; load new LR \n" \ |
38 | "st r12, @-r15 \n\t" \ | 31 | " st sp, @%2 ; store old SP \n" \ |
39 | "st r13, @-r15 \n\t" \ | 32 | " ld sp, @%3 ; load new SP \n" \ |
40 | "st r14, @-r15 \n\t" \ | 33 | " push %1 ; store `prev' on new stack \n" \ |
41 | "seth r14, #high(1f) \n\t" \ | 34 | " jmp lr \n" \ |
42 | "or3 r14, r14, #low(1f) \n\t" \ | 35 | " .fillinsn \n" \ |
43 | "st r14, @r4 ; store old LR \n\t" \ | 36 | "1: \n" \ |
44 | "st r15, @r2 ; store old SP \n\t" \ | 37 | " pop %0 ; restore `__last' from new stack \n" \ |
45 | "ld r15, @r3 ; load new SP \n\t" \ | 38 | : "=r" (last) \ |
46 | "st r0, @-r15 ; store 'prev' onto new stack \n\t" \ | 39 | : "0" (prev), \ |
47 | "ld r14, @r5 ; load new LR \n\t" \ | 40 | "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \ |
48 | "jmp r14 \n\t" \ | 41 | "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \ |
49 | ".fillinsn \n " \ | 42 | : "memory", "lr" \ |
50 | "1: \n\t" \ | ||
51 | "ld r6, @r15+ ; load 'prev' from new stack \n\t" \ | ||
52 | "ld r14, @r15+ \n\t" \ | ||
53 | "ld r13, @r15+ \n\t" \ | ||
54 | "ld r12, @r15+ \n\t" \ | ||
55 | "ld r11, @r15+ \n\t" \ | ||
56 | "ld r10, @r15+ \n\t" \ | ||
57 | "ld r9, @r15+ \n\t" \ | ||
58 | "ld r8, @r15+ \n\t" \ | ||
59 | : "=&r" (__last) \ | ||
60 | : "r" (arg0), "r" (arg1), "r" (oldsp), "r" (newsp), \ | ||
61 | "r" (oldlr), "r" (newlr) \ | ||
62 | : "memory" \ | ||
63 | ); \ | 43 | ); \ |
64 | last = __last; \ | ||
65 | } while(0) | 44 | } while(0) |
66 | 45 | ||
67 | /* | 46 | /* |
@@ -167,8 +146,8 @@ extern void __xchg_called_with_bad_pointer(void); | |||
167 | #define DCACHE_CLEAR(reg0, reg1, addr) | 146 | #define DCACHE_CLEAR(reg0, reg1, addr) |
168 | #endif /* CONFIG_CHIP_M32700_TS1 */ | 147 | #endif /* CONFIG_CHIP_M32700_TS1 */ |
169 | 148 | ||
170 | static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, | 149 | static inline unsigned long |
171 | int size) | 150 | __xchg(unsigned long x, volatile void * ptr, int size) |
172 | { | 151 | { |
173 | unsigned long flags; | 152 | unsigned long flags; |
174 | unsigned long tmp = 0; | 153 | unsigned long tmp = 0; |
@@ -220,7 +199,7 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, | |||
220 | 199 | ||
221 | #define __HAVE_ARCH_CMPXCHG 1 | 200 | #define __HAVE_ARCH_CMPXCHG 1 |
222 | 201 | ||
223 | static __inline__ unsigned long | 202 | static inline unsigned long |
224 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) | 203 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) |
225 | { | 204 | { |
226 | unsigned long flags; | 205 | unsigned long flags; |
@@ -254,7 +233,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) | |||
254 | if something tries to do an invalid cmpxchg(). */ | 233 | if something tries to do an invalid cmpxchg(). */ |
255 | extern void __cmpxchg_called_with_bad_pointer(void); | 234 | extern void __cmpxchg_called_with_bad_pointer(void); |
256 | 235 | ||
257 | static __inline__ unsigned long | 236 | static inline unsigned long |
258 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | 237 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) |
259 | { | 238 | { |
260 | switch (size) { | 239 | switch (size) { |
diff --git a/include/asm-mips/asmmacro.h b/include/asm-mips/asmmacro.h index 30b18ea6cb11..f54aa147ec19 100644 --- a/include/asm-mips/asmmacro.h +++ b/include/asm-mips/asmmacro.h | |||
@@ -17,7 +17,26 @@ | |||
17 | #ifdef CONFIG_64BIT | 17 | #ifdef CONFIG_64BIT |
18 | #include <asm/asmmacro-64.h> | 18 | #include <asm/asmmacro-64.h> |
19 | #endif | 19 | #endif |
20 | #ifdef CONFIG_MIPS_MT_SMTC | ||
21 | #include <asm/mipsmtregs.h> | ||
22 | #endif | ||
20 | 23 | ||
24 | #ifdef CONFIG_MIPS_MT_SMTC | ||
25 | .macro local_irq_enable reg=t0 | ||
26 | mfc0 \reg, CP0_TCSTATUS | ||
27 | ori \reg, \reg, TCSTATUS_IXMT | ||
28 | xori \reg, \reg, TCSTATUS_IXMT | ||
29 | mtc0 \reg, CP0_TCSTATUS | ||
30 | ehb | ||
31 | .endm | ||
32 | |||
33 | .macro local_irq_disable reg=t0 | ||
34 | mfc0 \reg, CP0_TCSTATUS | ||
35 | ori \reg, \reg, TCSTATUS_IXMT | ||
36 | mtc0 \reg, CP0_TCSTATUS | ||
37 | ehb | ||
38 | .endm | ||
39 | #else | ||
21 | .macro local_irq_enable reg=t0 | 40 | .macro local_irq_enable reg=t0 |
22 | mfc0 \reg, CP0_STATUS | 41 | mfc0 \reg, CP0_STATUS |
23 | ori \reg, \reg, 1 | 42 | ori \reg, \reg, 1 |
@@ -32,6 +51,7 @@ | |||
32 | mtc0 \reg, CP0_STATUS | 51 | mtc0 \reg, CP0_STATUS |
33 | irq_disable_hazard | 52 | irq_disable_hazard |
34 | .endm | 53 | .endm |
54 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
35 | 55 | ||
36 | #ifdef CONFIG_CPU_SB1 | 56 | #ifdef CONFIG_CPU_SB1 |
37 | .macro fpu_enable_hazard | 57 | .macro fpu_enable_hazard |
@@ -48,4 +68,31 @@ | |||
48 | .endm | 68 | .endm |
49 | #endif | 69 | #endif |
50 | 70 | ||
71 | /* | ||
72 | * Temporary until all gas have MT ASE support | ||
73 | */ | ||
74 | .macro DMT reg=0 | ||
75 | .word (0x41600bc1 | (\reg << 16)) | ||
76 | .endm | ||
77 | |||
78 | .macro EMT reg=0 | ||
79 | .word (0x41600be1 | (\reg << 16)) | ||
80 | .endm | ||
81 | |||
82 | .macro DVPE reg=0 | ||
83 | .word (0x41600001 | (\reg << 16)) | ||
84 | .endm | ||
85 | |||
86 | .macro EVPE reg=0 | ||
87 | .word (0x41600021 | (\reg << 16)) | ||
88 | .endm | ||
89 | |||
90 | .macro MFTR rt=0, rd=0, u=0, sel=0 | ||
91 | .word (0x41000000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)) | ||
92 | .endm | ||
93 | |||
94 | .macro MTTR rt=0, rd=0, u=0, sel=0 | ||
95 | .word (0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)) | ||
96 | .endm | ||
97 | |||
51 | #endif /* _ASM_ASMMACRO_H */ | 98 | #endif /* _ASM_ASMMACRO_H */ |
diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h index aeae9fabf4a9..47bc8f6c20d2 100644 --- a/include/asm-mips/cacheflush.h +++ b/include/asm-mips/cacheflush.h | |||
@@ -74,6 +74,7 @@ static inline void copy_from_user_page(struct vm_area_struct *vma, | |||
74 | 74 | ||
75 | extern void (*flush_cache_sigtramp)(unsigned long addr); | 75 | extern void (*flush_cache_sigtramp)(unsigned long addr); |
76 | extern void (*flush_icache_all)(void); | 76 | extern void (*flush_icache_all)(void); |
77 | extern void (*local_flush_data_cache_page)(void * addr); | ||
77 | extern void (*flush_data_cache_page)(unsigned long addr); | 78 | extern void (*flush_data_cache_page)(unsigned long addr); |
78 | 79 | ||
79 | /* | 80 | /* |
diff --git a/include/asm-mips/cpu-features.h b/include/asm-mips/cpu-features.h index 3f2b6d9ac45e..254e11ed247b 100644 --- a/include/asm-mips/cpu-features.h +++ b/include/asm-mips/cpu-features.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #define cpu_has_sb1_cache (cpu_data[0].options & MIPS_CPU_SB1_CACHE) | 40 | #define cpu_has_sb1_cache (cpu_data[0].options & MIPS_CPU_SB1_CACHE) |
41 | #endif | 41 | #endif |
42 | #ifndef cpu_has_fpu | 42 | #ifndef cpu_has_fpu |
43 | #define cpu_has_fpu (cpu_data[0].options & MIPS_CPU_FPU) | 43 | #define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU) |
44 | #endif | 44 | #endif |
45 | #ifndef cpu_has_32fpr | 45 | #ifndef cpu_has_32fpr |
46 | #define cpu_has_32fpr (cpu_data[0].options & MIPS_CPU_32FPR) | 46 | #define cpu_has_32fpr (cpu_data[0].options & MIPS_CPU_32FPR) |
diff --git a/include/asm-mips/cpu-info.h b/include/asm-mips/cpu-info.h index 140be1c67da7..6572ac703662 100644 --- a/include/asm-mips/cpu-info.h +++ b/include/asm-mips/cpu-info.h | |||
@@ -73,6 +73,16 @@ struct cpuinfo_mips { | |||
73 | struct cache_desc dcache; /* Primary D or combined I/D cache */ | 73 | struct cache_desc dcache; /* Primary D or combined I/D cache */ |
74 | struct cache_desc scache; /* Secondary cache */ | 74 | struct cache_desc scache; /* Secondary cache */ |
75 | struct cache_desc tcache; /* Tertiary/split secondary cache */ | 75 | struct cache_desc tcache; /* Tertiary/split secondary cache */ |
76 | #if defined(CONFIG_MIPS_MT_SMTC) | ||
77 | /* | ||
78 | * In the MIPS MT "SMTC" model, each TC is considered | ||
79 | * to be a "CPU" for the purposes of scheduling, but | ||
80 | * exception resources, ASID spaces, etc, are common | ||
81 | * to all TCs within the same VPE. | ||
82 | */ | ||
83 | int vpe_id; /* Virtual Processor number */ | ||
84 | int tc_id; /* Thread Context number */ | ||
85 | #endif /* CONFIG_MIPS_MT */ | ||
76 | void *data; /* Additional data */ | 86 | void *data; /* Additional data */ |
77 | } __attribute__((aligned(SMP_CACHE_BYTES))); | 87 | } __attribute__((aligned(SMP_CACHE_BYTES))); |
78 | 88 | ||
diff --git a/include/asm-mips/ds1742.h b/include/asm-mips/ds1742.h new file mode 100644 index 000000000000..c2f2c32da637 --- /dev/null +++ b/include/asm-mips/ds1742.h | |||
@@ -0,0 +1,13 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) | ||
7 | */ | ||
8 | #ifndef _ASM_DS1742_H | ||
9 | #define _ASM_DS1742_H | ||
10 | |||
11 | #include <ds1742.h> | ||
12 | |||
13 | #endif /* _ASM_DS1742_H */ | ||
diff --git a/include/asm-mips/elf.h b/include/asm-mips/elf.h index 851f013adad3..bdc9de2df1ef 100644 --- a/include/asm-mips/elf.h +++ b/include/asm-mips/elf.h | |||
@@ -119,8 +119,49 @@ | |||
119 | #define SHT_MIPS_CONFLICT 0x70000002 | 119 | #define SHT_MIPS_CONFLICT 0x70000002 |
120 | #define SHT_MIPS_GPTAB 0x70000003 | 120 | #define SHT_MIPS_GPTAB 0x70000003 |
121 | #define SHT_MIPS_UCODE 0x70000004 | 121 | #define SHT_MIPS_UCODE 0x70000004 |
122 | 122 | #define SHT_MIPS_DEBUG 0x70000005 | |
123 | #define SHF_MIPS_GPREL 0x10000000 | 123 | #define SHT_MIPS_REGINFO 0x70000006 |
124 | #define SHT_MIPS_PACKAGE 0x70000007 | ||
125 | #define SHT_MIPS_PACKSYM 0x70000008 | ||
126 | #define SHT_MIPS_RELD 0x70000009 | ||
127 | #define SHT_MIPS_IFACE 0x7000000b | ||
128 | #define SHT_MIPS_CONTENT 0x7000000c | ||
129 | #define SHT_MIPS_OPTIONS 0x7000000d | ||
130 | #define SHT_MIPS_SHDR 0x70000010 | ||
131 | #define SHT_MIPS_FDESC 0x70000011 | ||
132 | #define SHT_MIPS_EXTSYM 0x70000012 | ||
133 | #define SHT_MIPS_DENSE 0x70000013 | ||
134 | #define SHT_MIPS_PDESC 0x70000014 | ||
135 | #define SHT_MIPS_LOCSYM 0x70000015 | ||
136 | #define SHT_MIPS_AUXSYM 0x70000016 | ||
137 | #define SHT_MIPS_OPTSYM 0x70000017 | ||
138 | #define SHT_MIPS_LOCSTR 0x70000018 | ||
139 | #define SHT_MIPS_LINE 0x70000019 | ||
140 | #define SHT_MIPS_RFDESC 0x7000001a | ||
141 | #define SHT_MIPS_DELTASYM 0x7000001b | ||
142 | #define SHT_MIPS_DELTAINST 0x7000001c | ||
143 | #define SHT_MIPS_DELTACLASS 0x7000001d | ||
144 | #define SHT_MIPS_DWARF 0x7000001e | ||
145 | #define SHT_MIPS_DELTADECL 0x7000001f | ||
146 | #define SHT_MIPS_SYMBOL_LIB 0x70000020 | ||
147 | #define SHT_MIPS_EVENTS 0x70000021 | ||
148 | #define SHT_MIPS_TRANSLATE 0x70000022 | ||
149 | #define SHT_MIPS_PIXIE 0x70000023 | ||
150 | #define SHT_MIPS_XLATE 0x70000024 | ||
151 | #define SHT_MIPS_XLATE_DEBUG 0x70000025 | ||
152 | #define SHT_MIPS_WHIRL 0x70000026 | ||
153 | #define SHT_MIPS_EH_REGION 0x70000027 | ||
154 | #define SHT_MIPS_XLATE_OLD 0x70000028 | ||
155 | #define SHT_MIPS_PDR_EXCEPTION 0x70000029 | ||
156 | |||
157 | #define SHF_MIPS_GPREL 0x10000000 | ||
158 | #define SHF_MIPS_MERGE 0x20000000 | ||
159 | #define SHF_MIPS_ADDR 0x40000000 | ||
160 | #define SHF_MIPS_STRING 0x80000000 | ||
161 | #define SHF_MIPS_NOSTRIP 0x08000000 | ||
162 | #define SHF_MIPS_LOCAL 0x04000000 | ||
163 | #define SHF_MIPS_NAMES 0x02000000 | ||
164 | #define SHF_MIPS_NODUPES 0x01000000 | ||
124 | 165 | ||
125 | #ifndef ELF_ARCH | 166 | #ifndef ELF_ARCH |
126 | /* ELF register definitions */ | 167 | /* ELF register definitions */ |
diff --git a/include/asm-mips/fpu.h b/include/asm-mips/fpu.h index 9c828b1f8218..b0f50015e252 100644 --- a/include/asm-mips/fpu.h +++ b/include/asm-mips/fpu.h | |||
@@ -21,6 +21,10 @@ | |||
21 | #include <asm/processor.h> | 21 | #include <asm/processor.h> |
22 | #include <asm/current.h> | 22 | #include <asm/current.h> |
23 | 23 | ||
24 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
25 | #include <asm/mips_mt.h> | ||
26 | #endif | ||
27 | |||
24 | struct sigcontext; | 28 | struct sigcontext; |
25 | struct sigcontext32; | 29 | struct sigcontext32; |
26 | 30 | ||
diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h index feb29a793888..dadc05188db7 100644 --- a/include/asm-mips/hazards.h +++ b/include/asm-mips/hazards.h | |||
@@ -284,6 +284,8 @@ do { \ | |||
284 | #define instruction_hazard() do { } while (0) | 284 | #define instruction_hazard() do { } while (0) |
285 | #endif | 285 | #endif |
286 | 286 | ||
287 | extern void mips_ihb(void); | ||
288 | |||
287 | #endif /* __ASSEMBLY__ */ | 289 | #endif /* __ASSEMBLY__ */ |
288 | 290 | ||
289 | #endif /* _ASM_HAZARDS_H */ | 291 | #endif /* _ASM_HAZARDS_H */ |
diff --git a/include/asm-mips/interrupt.h b/include/asm-mips/interrupt.h index 774348734fa0..4bb9c06f4410 100644 --- a/include/asm-mips/interrupt.h +++ b/include/asm-mips/interrupt.h | |||
@@ -19,7 +19,12 @@ __asm__ ( | |||
19 | " .set push \n" | 19 | " .set push \n" |
20 | " .set reorder \n" | 20 | " .set reorder \n" |
21 | " .set noat \n" | 21 | " .set noat \n" |
22 | #ifdef CONFIG_CPU_MIPSR2 | 22 | #ifdef CONFIG_MIPS_MT_SMTC |
23 | " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" | ||
24 | " ori $1, 0x400 \n" | ||
25 | " xori $1, 0x400 \n" | ||
26 | " mtc0 $1, $2, 1 \n" | ||
27 | #elif defined(CONFIG_CPU_MIPSR2) | ||
23 | " ei \n" | 28 | " ei \n" |
24 | #else | 29 | #else |
25 | " mfc0 $1,$12 \n" | 30 | " mfc0 $1,$12 \n" |
@@ -62,7 +67,12 @@ __asm__ ( | |||
62 | " .macro local_irq_disable\n" | 67 | " .macro local_irq_disable\n" |
63 | " .set push \n" | 68 | " .set push \n" |
64 | " .set noat \n" | 69 | " .set noat \n" |
65 | #ifdef CONFIG_CPU_MIPSR2 | 70 | #ifdef CONFIG_MIPS_MT_SMTC |
71 | " mfc0 $1, $2, 1 \n" | ||
72 | " ori $1, 0x400 \n" | ||
73 | " .set noreorder \n" | ||
74 | " mtc0 $1, $2, 1 \n" | ||
75 | #elif defined(CONFIG_CPU_MIPSR2) | ||
66 | " di \n" | 76 | " di \n" |
67 | #else | 77 | #else |
68 | " mfc0 $1,$12 \n" | 78 | " mfc0 $1,$12 \n" |
@@ -88,7 +98,11 @@ __asm__ ( | |||
88 | " .macro local_save_flags flags \n" | 98 | " .macro local_save_flags flags \n" |
89 | " .set push \n" | 99 | " .set push \n" |
90 | " .set reorder \n" | 100 | " .set reorder \n" |
101 | #ifdef CONFIG_MIPS_MT_SMTC | ||
102 | " mfc0 \\flags, $2, 1 \n" | ||
103 | #else | ||
91 | " mfc0 \\flags, $12 \n" | 104 | " mfc0 \\flags, $12 \n" |
105 | #endif | ||
92 | " .set pop \n" | 106 | " .set pop \n" |
93 | " .endm \n"); | 107 | " .endm \n"); |
94 | 108 | ||
@@ -102,7 +116,13 @@ __asm__ ( | |||
102 | " .set push \n" | 116 | " .set push \n" |
103 | " .set reorder \n" | 117 | " .set reorder \n" |
104 | " .set noat \n" | 118 | " .set noat \n" |
105 | #ifdef CONFIG_CPU_MIPSR2 | 119 | #ifdef CONFIG_MIPS_MT_SMTC |
120 | " mfc0 \\result, $2, 1 \n" | ||
121 | " ori $1, \\result, 0x400 \n" | ||
122 | " .set noreorder \n" | ||
123 | " mtc0 $1, $2, 1 \n" | ||
124 | " andi \\result, \\result, 0x400 \n" | ||
125 | #elif defined(CONFIG_CPU_MIPSR2) | ||
106 | " di \\result \n" | 126 | " di \\result \n" |
107 | " andi \\result, 1 \n" | 127 | " andi \\result, 1 \n" |
108 | #else | 128 | #else |
@@ -128,7 +148,14 @@ __asm__ ( | |||
128 | " .set push \n" | 148 | " .set push \n" |
129 | " .set noreorder \n" | 149 | " .set noreorder \n" |
130 | " .set noat \n" | 150 | " .set noat \n" |
131 | #if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | 151 | #ifdef CONFIG_MIPS_MT_SMTC |
152 | "mfc0 $1, $2, 1 \n" | ||
153 | "andi \\flags, 0x400 \n" | ||
154 | "ori $1, 0x400 \n" | ||
155 | "xori $1, 0x400 \n" | ||
156 | "or \\flags, $1 \n" | ||
157 | "mtc0 \\flags, $2, 1 \n" | ||
158 | #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | ||
132 | /* | 159 | /* |
133 | * Slow, but doesn't suffer from a relativly unlikely race | 160 | * Slow, but doesn't suffer from a relativly unlikely race |
134 | * condition we're having since days 1. | 161 | * condition we're having since days 1. |
@@ -167,11 +194,29 @@ do { \ | |||
167 | : "memory"); \ | 194 | : "memory"); \ |
168 | } while(0) | 195 | } while(0) |
169 | 196 | ||
170 | #define irqs_disabled() \ | 197 | static inline int irqs_disabled(void) |
171 | ({ \ | 198 | { |
172 | unsigned long flags; \ | 199 | #ifdef CONFIG_MIPS_MT_SMTC |
173 | local_save_flags(flags); \ | 200 | /* |
174 | !(flags & 1); \ | 201 | * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU |
175 | }) | 202 | */ |
203 | unsigned long __result; | ||
204 | |||
205 | __asm__ __volatile__( | ||
206 | " .set noreorder \n" | ||
207 | " mfc0 %0, $2, 1 \n" | ||
208 | " andi %0, 0x400 \n" | ||
209 | " slt %0, $0, %0 \n" | ||
210 | " .set reorder \n" | ||
211 | : "=r" (__result)); | ||
212 | |||
213 | return __result; | ||
214 | #else | ||
215 | unsigned long flags; | ||
216 | local_save_flags(flags); | ||
217 | |||
218 | return !(flags & 1); | ||
219 | #endif | ||
220 | } | ||
176 | 221 | ||
177 | #endif /* _ASM_INTERRUPT_H */ | 222 | #endif /* _ASM_INTERRUPT_H */ |
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index 8a342ccb34a8..dde677f02bc0 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h | |||
@@ -11,6 +11,9 @@ | |||
11 | 11 | ||
12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | |||
15 | #include <asm/mipsmtregs.h> | ||
16 | |||
14 | #include <irq.h> | 17 | #include <irq.h> |
15 | 18 | ||
16 | #ifdef CONFIG_I8259 | 19 | #ifdef CONFIG_I8259 |
@@ -26,6 +29,23 @@ struct pt_regs; | |||
26 | 29 | ||
27 | extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs); | 30 | extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs); |
28 | 31 | ||
32 | #ifdef CONFIG_MIPS_MT_SMTC | ||
33 | /* | ||
34 | * Clear interrupt mask handling "backstop" if irq_hwmask | ||
35 | * entry so indicates. This implies that the ack() or end() | ||
36 | * functions will take over re-enabling the low-level mask. | ||
37 | * Otherwise it will be done on return from exception. | ||
38 | */ | ||
39 | #define __DO_IRQ_SMTC_HOOK() \ | ||
40 | do { \ | ||
41 | if (irq_hwmask[irq] & 0x0000ff00) \ | ||
42 | write_c0_tccontext(read_c0_tccontext() & \ | ||
43 | ~(irq_hwmask[irq] & 0x0000ff00)); \ | ||
44 | } while (0) | ||
45 | #else | ||
46 | #define __DO_IRQ_SMTC_HOOK() do { } while (0) | ||
47 | #endif | ||
48 | |||
29 | #ifdef CONFIG_PREEMPT | 49 | #ifdef CONFIG_PREEMPT |
30 | 50 | ||
31 | /* | 51 | /* |
@@ -39,6 +59,7 @@ extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs); | |||
39 | #define do_IRQ(irq, regs) \ | 59 | #define do_IRQ(irq, regs) \ |
40 | do { \ | 60 | do { \ |
41 | irq_enter(); \ | 61 | irq_enter(); \ |
62 | __DO_IRQ_SMTC_HOOK(); \ | ||
42 | __do_IRQ((irq), (regs)); \ | 63 | __do_IRQ((irq), (regs)); \ |
43 | irq_exit(); \ | 64 | irq_exit(); \ |
44 | } while (0) | 65 | } while (0) |
@@ -46,5 +67,14 @@ do { \ | |||
46 | #endif | 67 | #endif |
47 | 68 | ||
48 | extern void arch_init_irq(void); | 69 | extern void arch_init_irq(void); |
70 | extern void spurious_interrupt(struct pt_regs *regs); | ||
71 | |||
72 | #ifdef CONFIG_MIPS_MT_SMTC | ||
73 | struct irqaction; | ||
74 | |||
75 | extern unsigned long irq_hwmask[]; | ||
76 | extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, | ||
77 | unsigned long hwmask); | ||
78 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
49 | 79 | ||
50 | #endif /* _ASM_IRQ_H */ | 80 | #endif /* _ASM_IRQ_H */ |
diff --git a/include/asm-mips/kspd.h b/include/asm-mips/kspd.h new file mode 100644 index 000000000000..4e9e724c8935 --- /dev/null +++ b/include/asm-mips/kspd.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can distribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License (Version 2) as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
11 | * for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
15 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_KSPD_H | ||
20 | #define _ASM_KSPD_H | ||
21 | |||
22 | struct kspd_notifications { | ||
23 | void (*kspd_sp_exit)(int sp_id); | ||
24 | |||
25 | struct list_head list; | ||
26 | }; | ||
27 | |||
28 | #ifdef CONFIG_MIPS_APSP_KSPD | ||
29 | extern void kspd_notify(struct kspd_notifications *notify); | ||
30 | #else | ||
31 | static inline void kspd_notify(struct kspd_notifications *notify) | ||
32 | { | ||
33 | } | ||
34 | #endif | ||
35 | |||
36 | #endif | ||
diff --git a/include/asm-mips/mach-generic/ide.h b/include/asm-mips/mach-generic/ide.h index 550979a9ea9d..e3315359500a 100644 --- a/include/asm-mips/mach-generic/ide.h +++ b/include/asm-mips/mach-generic/ide.h | |||
@@ -104,65 +104,107 @@ static __inline__ unsigned long ide_default_io_base(int index) | |||
104 | #endif | 104 | #endif |
105 | 105 | ||
106 | /* MIPS port and memory-mapped I/O string operations. */ | 106 | /* MIPS port and memory-mapped I/O string operations. */ |
107 | static inline void __ide_flush_prologue(void) | ||
108 | { | ||
109 | #ifdef CONFIG_SMP | ||
110 | if (cpu_has_dc_aliases) | ||
111 | preempt_disable(); | ||
112 | #endif | ||
113 | } | ||
114 | |||
115 | static inline void __ide_flush_epilogue(void) | ||
116 | { | ||
117 | #ifdef CONFIG_SMP | ||
118 | if (cpu_has_dc_aliases) | ||
119 | preempt_enable(); | ||
120 | #endif | ||
121 | } | ||
107 | 122 | ||
108 | static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long size) | 123 | static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long size) |
109 | { | 124 | { |
110 | if (cpu_has_dc_aliases) { | 125 | if (cpu_has_dc_aliases) { |
111 | unsigned long end = addr + size; | 126 | unsigned long end = addr + size; |
112 | for (; addr < end; addr += PAGE_SIZE) | 127 | |
113 | flush_dcache_page(virt_to_page(addr)); | 128 | while (addr < end) { |
129 | local_flush_data_cache_page((void *)addr); | ||
130 | addr += PAGE_SIZE; | ||
131 | } | ||
114 | } | 132 | } |
115 | } | 133 | } |
116 | 134 | ||
135 | /* | ||
136 | * insw() and gang might be called with interrupts disabled, so we can't | ||
137 | * send IPIs for flushing due to the potencial of deadlocks, see the comment | ||
138 | * above smp_call_function() in arch/mips/kernel/smp.c. We work around the | ||
139 | * problem by disabling preemption so we know we actually perform the flush | ||
140 | * on the processor that actually has the lines to be flushed which hopefully | ||
141 | * is even better for performance anyway. | ||
142 | */ | ||
117 | static inline void __ide_insw(unsigned long port, void *addr, | 143 | static inline void __ide_insw(unsigned long port, void *addr, |
118 | unsigned int count) | 144 | unsigned int count) |
119 | { | 145 | { |
146 | __ide_flush_prologue(); | ||
120 | insw(port, addr, count); | 147 | insw(port, addr, count); |
121 | __ide_flush_dcache_range((unsigned long)addr, count * 2); | 148 | __ide_flush_dcache_range((unsigned long)addr, count * 2); |
149 | __ide_flush_epilogue(); | ||
122 | } | 150 | } |
123 | 151 | ||
124 | static inline void __ide_insl(unsigned long port, void *addr, unsigned int count) | 152 | static inline void __ide_insl(unsigned long port, void *addr, unsigned int count) |
125 | { | 153 | { |
154 | __ide_flush_prologue(); | ||
126 | insl(port, addr, count); | 155 | insl(port, addr, count); |
127 | __ide_flush_dcache_range((unsigned long)addr, count * 4); | 156 | __ide_flush_dcache_range((unsigned long)addr, count * 4); |
157 | __ide_flush_epilogue(); | ||
128 | } | 158 | } |
129 | 159 | ||
130 | static inline void __ide_outsw(unsigned long port, const void *addr, | 160 | static inline void __ide_outsw(unsigned long port, const void *addr, |
131 | unsigned long count) | 161 | unsigned long count) |
132 | { | 162 | { |
163 | __ide_flush_prologue(); | ||
133 | outsw(port, addr, count); | 164 | outsw(port, addr, count); |
134 | __ide_flush_dcache_range((unsigned long)addr, count * 2); | 165 | __ide_flush_dcache_range((unsigned long)addr, count * 2); |
166 | __ide_flush_epilogue(); | ||
135 | } | 167 | } |
136 | 168 | ||
137 | static inline void __ide_outsl(unsigned long port, const void *addr, | 169 | static inline void __ide_outsl(unsigned long port, const void *addr, |
138 | unsigned long count) | 170 | unsigned long count) |
139 | { | 171 | { |
172 | __ide_flush_prologue(); | ||
140 | outsl(port, addr, count); | 173 | outsl(port, addr, count); |
141 | __ide_flush_dcache_range((unsigned long)addr, count * 4); | 174 | __ide_flush_dcache_range((unsigned long)addr, count * 4); |
175 | __ide_flush_epilogue(); | ||
142 | } | 176 | } |
143 | 177 | ||
144 | static inline void __ide_mm_insw(void __iomem *port, void *addr, u32 count) | 178 | static inline void __ide_mm_insw(void __iomem *port, void *addr, u32 count) |
145 | { | 179 | { |
180 | __ide_flush_prologue(); | ||
146 | readsw(port, addr, count); | 181 | readsw(port, addr, count); |
147 | __ide_flush_dcache_range((unsigned long)addr, count * 2); | 182 | __ide_flush_dcache_range((unsigned long)addr, count * 2); |
183 | __ide_flush_epilogue(); | ||
148 | } | 184 | } |
149 | 185 | ||
150 | static inline void __ide_mm_insl(void __iomem *port, void *addr, u32 count) | 186 | static inline void __ide_mm_insl(void __iomem *port, void *addr, u32 count) |
151 | { | 187 | { |
188 | __ide_flush_prologue(); | ||
152 | readsl(port, addr, count); | 189 | readsl(port, addr, count); |
153 | __ide_flush_dcache_range((unsigned long)addr, count * 4); | 190 | __ide_flush_dcache_range((unsigned long)addr, count * 4); |
191 | __ide_flush_epilogue(); | ||
154 | } | 192 | } |
155 | 193 | ||
156 | static inline void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) | 194 | static inline void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) |
157 | { | 195 | { |
196 | __ide_flush_prologue(); | ||
158 | writesw(port, addr, count); | 197 | writesw(port, addr, count); |
159 | __ide_flush_dcache_range((unsigned long)addr, count * 2); | 198 | __ide_flush_dcache_range((unsigned long)addr, count * 2); |
199 | __ide_flush_epilogue(); | ||
160 | } | 200 | } |
161 | 201 | ||
162 | static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count) | 202 | static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count) |
163 | { | 203 | { |
204 | __ide_flush_prologue(); | ||
164 | writesl(port, addr, count); | 205 | writesl(port, addr, count); |
165 | __ide_flush_dcache_range((unsigned long)addr, count * 4); | 206 | __ide_flush_dcache_range((unsigned long)addr, count * 4); |
207 | __ide_flush_epilogue(); | ||
166 | } | 208 | } |
167 | 209 | ||
168 | /* ide_insw calls insw, not __ide_insw. Why? */ | 210 | /* ide_insw calls insw, not __ide_insw. Why? */ |
diff --git a/include/asm-mips/mach-jmr3927/ds1742.h b/include/asm-mips/mach-jmr3927/ds1742.h index cff6192d4bdb..8a8fef6d07fa 100644 --- a/include/asm-mips/mach-jmr3927/ds1742.h +++ b/include/asm-mips/mach-jmr3927/ds1742.h | |||
@@ -3,14 +3,14 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2003 by Ralf Baechle | 6 | * Copyright (C) 2003, 06 by Ralf Baechle |
7 | */ | 7 | */ |
8 | #ifndef __ASM_MACH_JMR3927_DS1742_H | 8 | #ifndef __ASM_MACH_JMR3927_DS1742_H |
9 | #define __ASM_MACH_JMR3927_DS1742_H | 9 | #define __ASM_MACH_JMR3927_DS1742_H |
10 | 10 | ||
11 | #include <asm/jmr3927/jmr3927.h> | 11 | #include <asm/jmr3927/jmr3927.h> |
12 | 12 | ||
13 | #define rtc_read(reg) (jmr3927_nvram_in(addr)) | 13 | #define rtc_read(reg) (jmr3927_nvram_in(reg)) |
14 | #define rtc_write(data, reg) (jmr3927_nvram_out((data),(reg))) | 14 | #define rtc_write(data, reg) (jmr3927_nvram_out((data),(reg))) |
15 | 15 | ||
16 | #endif /* __ASM_MACH_JMR3927_DS1742_H */ | 16 | #endif /* __ASM_MACH_JMR3927_DS1742_H */ |
diff --git a/include/asm-mips/mach-mips/param.h b/include/asm-mips/mach-mips/param.h new file mode 100644 index 000000000000..805ef6d27d3c --- /dev/null +++ b/include/asm-mips/mach-mips/param.h | |||
@@ -0,0 +1,13 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2003 by Ralf Baechle | ||
7 | */ | ||
8 | #ifndef __ASM_MACH_MIPS_PARAM_H | ||
9 | #define __ASM_MACH_MIPS_PARAM_H | ||
10 | |||
11 | #define HZ 100 /* Internal kernel timer frequency */ | ||
12 | |||
13 | #endif /* __ASM_MACH_MIPS_PARAM_H */ | ||
diff --git a/include/asm-mips/marvell.h b/include/asm-mips/marvell.h index 9225b3397a4f..6bb2125bb053 100644 --- a/include/asm-mips/marvell.h +++ b/include/asm-mips/marvell.h | |||
@@ -53,4 +53,6 @@ struct mv_pci_controller { | |||
53 | unsigned long config_vreg; | 53 | unsigned long config_vreg; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | extern void ll_mv64340_irq(struct pt_regs *regs); | ||
57 | |||
56 | #endif /* __ASM_MIPS_MARVELL_H */ | 58 | #endif /* __ASM_MIPS_MARVELL_H */ |
diff --git a/include/asm-mips/mips-boards/atlas.h b/include/asm-mips/mips-boards/atlas.h index 0998151fb3a1..a8ae12d120ee 100644 --- a/include/asm-mips/mips-boards/atlas.h +++ b/include/asm-mips/mips-boards/atlas.h | |||
@@ -33,13 +33,29 @@ | |||
33 | #define ATLAS_RTC_ADR_REG 0x1f000800 | 33 | #define ATLAS_RTC_ADR_REG 0x1f000800 |
34 | #define ATLAS_RTC_DAT_REG 0x1f000808 | 34 | #define ATLAS_RTC_DAT_REG 0x1f000808 |
35 | 35 | ||
36 | |||
37 | /* | 36 | /* |
38 | * Atlas interrupt controller register base. | 37 | * Atlas interrupt controller register base. |
39 | */ | 38 | */ |
40 | #define ATLAS_ICTRL_REGS_BASE 0x1f000000 | 39 | #define ATLAS_ICTRL_REGS_BASE 0x1f000000 |
41 | 40 | ||
42 | /* | 41 | /* |
42 | * Atlas registers are memory mapped on 64-bit aligned boundaries and | ||
43 | * only word access are allowed. | ||
44 | */ | ||
45 | struct atlas_ictrl_regs { | ||
46 | volatile unsigned int intraw; | ||
47 | int dummy1; | ||
48 | volatile unsigned int intseten; | ||
49 | int dummy2; | ||
50 | volatile unsigned int intrsten; | ||
51 | int dummy3; | ||
52 | volatile unsigned int intenable; | ||
53 | int dummy4; | ||
54 | volatile unsigned int intstatus; | ||
55 | int dummy5; | ||
56 | }; | ||
57 | |||
58 | /* | ||
43 | * Atlas UART register base. | 59 | * Atlas UART register base. |
44 | */ | 60 | */ |
45 | #define ATLAS_UART_REGS_BASE 0x1f000900 | 61 | #define ATLAS_UART_REGS_BASE 0x1f000900 |
diff --git a/include/asm-mips/mips-boards/atlasint.h b/include/asm-mips/mips-boards/atlasint.h index bba35c183d08..fd7ebc54fa90 100644 --- a/include/asm-mips/mips-boards/atlasint.h +++ b/include/asm-mips/mips-boards/atlasint.h | |||
@@ -62,23 +62,4 @@ | |||
62 | #define ATLASINT_RES31 (ATLASINT_BASE+31) | 62 | #define ATLASINT_RES31 (ATLASINT_BASE+31) |
63 | #define ATLASINT_END (ATLASINT_BASE+31) | 63 | #define ATLASINT_END (ATLASINT_BASE+31) |
64 | 64 | ||
65 | /* | ||
66 | * Atlas registers are memory mapped on 64-bit aligned boundaries and | ||
67 | * only word access are allowed. | ||
68 | */ | ||
69 | struct atlas_ictrl_regs { | ||
70 | volatile unsigned int intraw; | ||
71 | int dummy1; | ||
72 | volatile unsigned int intseten; | ||
73 | int dummy2; | ||
74 | volatile unsigned int intrsten; | ||
75 | int dummy3; | ||
76 | volatile unsigned int intenable; | ||
77 | int dummy4; | ||
78 | volatile unsigned int intstatus; | ||
79 | int dummy5; | ||
80 | }; | ||
81 | |||
82 | extern void atlasint_init(void); | ||
83 | |||
84 | #endif /* !(_MIPS_ATLASINT_H) */ | 65 | #endif /* !(_MIPS_ATLASINT_H) */ |
diff --git a/include/asm-mips/mips_mt.h b/include/asm-mips/mips_mt.h new file mode 100644 index 000000000000..c31a312b9783 --- /dev/null +++ b/include/asm-mips/mips_mt.h | |||
@@ -0,0 +1,15 @@ | |||
1 | /* | ||
2 | * Definitions and decalrations for MIPS MT support | ||
3 | * that are common between SMTC, VSMP, and/or AP/SP | ||
4 | * kernel models. | ||
5 | */ | ||
6 | #ifndef __ASM_MIPS_MT_H | ||
7 | #define __ASM_MIPS_MT_H | ||
8 | |||
9 | extern cpumask_t mt_fpu_cpumask; | ||
10 | extern unsigned long mt_fpemul_threshold; | ||
11 | |||
12 | extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value); | ||
13 | extern void mips_mt_set_cpuoptions(void); | ||
14 | |||
15 | #endif /* __ASM_MIPS_MT_H */ | ||
diff --git a/include/asm-mips/mipsmtregs.h b/include/asm-mips/mipsmtregs.h index a669c0702c66..f637ce70758f 100644 --- a/include/asm-mips/mipsmtregs.h +++ b/include/asm-mips/mipsmtregs.h | |||
@@ -165,7 +165,7 @@ | |||
165 | 165 | ||
166 | #ifndef __ASSEMBLY__ | 166 | #ifndef __ASSEMBLY__ |
167 | 167 | ||
168 | extern void mips_mt_regdump(void); | 168 | extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value); |
169 | 169 | ||
170 | static inline unsigned int dvpe(void) | 170 | static inline unsigned int dvpe(void) |
171 | { | 171 | { |
@@ -234,7 +234,7 @@ static inline void __raw_emt(void) | |||
234 | __asm__ __volatile__( | 234 | __asm__ __volatile__( |
235 | " .set noreorder \n" | 235 | " .set noreorder \n" |
236 | " .set mips32r2 \n" | 236 | " .set mips32r2 \n" |
237 | " emt \n" | 237 | " .word 0x41600be1 # emt \n" |
238 | " ehb \n" | 238 | " ehb \n" |
239 | " .set mips0 \n" | 239 | " .set mips0 \n" |
240 | " .set reorder"); | 240 | " .set reorder"); |
@@ -282,8 +282,11 @@ static inline void ehb(void) | |||
282 | \ | 282 | \ |
283 | __asm__ __volatile__( \ | 283 | __asm__ __volatile__( \ |
284 | " .set push \n" \ | 284 | " .set push \n" \ |
285 | " .set noat \n" \ | ||
285 | " .set mips32r2 \n" \ | 286 | " .set mips32r2 \n" \ |
286 | " mftgpr %0," #rt " \n" \ | 287 | " # mftgpr $1," #rt " \n" \ |
288 | " .word 0x41000820 | (" #rt " << 16) \n" \ | ||
289 | " move %0, $1 \n" \ | ||
287 | " .set pop \n" \ | 290 | " .set pop \n" \ |
288 | : "=r" (__res)); \ | 291 | : "=r" (__res)); \ |
289 | \ | 292 | \ |
@@ -295,9 +298,7 @@ static inline void ehb(void) | |||
295 | unsigned long __res; \ | 298 | unsigned long __res; \ |
296 | \ | 299 | \ |
297 | __asm__ __volatile__( \ | 300 | __asm__ __volatile__( \ |
298 | ".set noat\n\t" \ | 301 | " mftr %0, " #rt ", " #u ", " #sel " \n" \ |
299 | "mftr\t%0, " #rt ", " #u ", " #sel "\n\t" \ | ||
300 | ".set at\n\t" \ | ||
301 | : "=r" (__res)); \ | 302 | : "=r" (__res)); \ |
302 | \ | 303 | \ |
303 | __res; \ | 304 | __res; \ |
@@ -364,6 +365,9 @@ do { \ | |||
364 | #define read_vpe_c0_ebase() mftc0(15,1) | 365 | #define read_vpe_c0_ebase() mftc0(15,1) |
365 | #define write_vpe_c0_ebase(val) mttc0(15, 1, val) | 366 | #define write_vpe_c0_ebase(val) mttc0(15, 1, val) |
366 | #define write_vpe_c0_compare(val) mttc0(11, 0, val) | 367 | #define write_vpe_c0_compare(val) mttc0(11, 0, val) |
368 | #define read_vpe_c0_badvaddr() mftc0(8, 0) | ||
369 | #define read_vpe_c0_epc() mftc0(14, 0) | ||
370 | #define write_vpe_c0_epc(val) mttc0(14, 0, val) | ||
367 | 371 | ||
368 | 372 | ||
369 | /* TC */ | 373 | /* TC */ |
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h index 035ba0a9b0df..a2ef579f6b1a 100644 --- a/include/asm-mips/mipsregs.h +++ b/include/asm-mips/mipsregs.h | |||
@@ -836,6 +836,9 @@ do { \ | |||
836 | #define read_c0_cache() __read_32bit_c0_register($7, 0) /* TX39xx */ | 836 | #define read_c0_cache() __read_32bit_c0_register($7, 0) /* TX39xx */ |
837 | #define write_c0_cache(val) __write_32bit_c0_register($7, 0, val) | 837 | #define write_c0_cache(val) __write_32bit_c0_register($7, 0, val) |
838 | 838 | ||
839 | #define read_c0_badvaddr() __read_ulong_c0_register($8, 0) | ||
840 | #define write_c0_badvaddr(val) __write_ulong_c0_register($8, 0, val) | ||
841 | |||
839 | #define read_c0_count() __read_32bit_c0_register($9, 0) | 842 | #define read_c0_count() __read_32bit_c0_register($9, 0) |
840 | #define write_c0_count(val) __write_32bit_c0_register($9, 0, val) | 843 | #define write_c0_count(val) __write_32bit_c0_register($9, 0, val) |
841 | 844 | ||
@@ -858,7 +861,19 @@ do { \ | |||
858 | #define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) | 861 | #define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) |
859 | 862 | ||
860 | #define read_c0_status() __read_32bit_c0_register($12, 0) | 863 | #define read_c0_status() __read_32bit_c0_register($12, 0) |
864 | #ifdef CONFIG_MIPS_MT_SMTC | ||
865 | #define write_c0_status(val) \ | ||
866 | do { \ | ||
867 | __write_32bit_c0_register($12, 0, val); \ | ||
868 | __ehb(); \ | ||
869 | } while (0) | ||
870 | #else | ||
871 | /* | ||
872 | * Legacy non-SMTC code, which may be hazardous | ||
873 | * but which might not support EHB | ||
874 | */ | ||
861 | #define write_c0_status(val) __write_32bit_c0_register($12, 0, val) | 875 | #define write_c0_status(val) __write_32bit_c0_register($12, 0, val) |
876 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
862 | 877 | ||
863 | #define read_c0_cause() __read_32bit_c0_register($13, 0) | 878 | #define read_c0_cause() __read_32bit_c0_register($13, 0) |
864 | #define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) | 879 | #define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) |
@@ -1001,6 +1016,9 @@ do { \ | |||
1001 | #define read_c0_taglo() __read_32bit_c0_register($28, 0) | 1016 | #define read_c0_taglo() __read_32bit_c0_register($28, 0) |
1002 | #define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val) | 1017 | #define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val) |
1003 | 1018 | ||
1019 | #define read_c0_dtaglo() __read_32bit_c0_register($28, 2) | ||
1020 | #define write_c0_dtaglo(val) __write_32bit_c0_register($28, 2, val) | ||
1021 | |||
1004 | #define read_c0_taghi() __read_32bit_c0_register($29, 0) | 1022 | #define read_c0_taghi() __read_32bit_c0_register($29, 0) |
1005 | #define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val) | 1023 | #define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val) |
1006 | 1024 | ||
@@ -1354,15 +1372,119 @@ static inline void tlb_write_random(void) | |||
1354 | /* | 1372 | /* |
1355 | * Manipulate bits in a c0 register. | 1373 | * Manipulate bits in a c0 register. |
1356 | */ | 1374 | */ |
1375 | #ifndef CONFIG_MIPS_MT_SMTC | ||
1376 | /* | ||
1377 | * SMTC Linux requires shutting-down microthread scheduling | ||
1378 | * during CP0 register read-modify-write sequences. | ||
1379 | */ | ||
1380 | #define __BUILD_SET_C0(name) \ | ||
1381 | static inline unsigned int \ | ||
1382 | set_c0_##name(unsigned int set) \ | ||
1383 | { \ | ||
1384 | unsigned int res; \ | ||
1385 | \ | ||
1386 | res = read_c0_##name(); \ | ||
1387 | res |= set; \ | ||
1388 | write_c0_##name(res); \ | ||
1389 | \ | ||
1390 | return res; \ | ||
1391 | } \ | ||
1392 | \ | ||
1393 | static inline unsigned int \ | ||
1394 | clear_c0_##name(unsigned int clear) \ | ||
1395 | { \ | ||
1396 | unsigned int res; \ | ||
1397 | \ | ||
1398 | res = read_c0_##name(); \ | ||
1399 | res &= ~clear; \ | ||
1400 | write_c0_##name(res); \ | ||
1401 | \ | ||
1402 | return res; \ | ||
1403 | } \ | ||
1404 | \ | ||
1405 | static inline unsigned int \ | ||
1406 | change_c0_##name(unsigned int change, unsigned int new) \ | ||
1407 | { \ | ||
1408 | unsigned int res; \ | ||
1409 | \ | ||
1410 | res = read_c0_##name(); \ | ||
1411 | res &= ~change; \ | ||
1412 | res |= (new & change); \ | ||
1413 | write_c0_##name(res); \ | ||
1414 | \ | ||
1415 | return res; \ | ||
1416 | } | ||
1417 | |||
1418 | #else /* SMTC versions that manage MT scheduling */ | ||
1419 | |||
1420 | #include <asm/interrupt.h> | ||
1421 | |||
1422 | /* | ||
1423 | * This is a duplicate of dmt() in mipsmtregs.h to avoid problems with | ||
1424 | * header file recursion. | ||
1425 | */ | ||
1426 | static inline unsigned int __dmt(void) | ||
1427 | { | ||
1428 | int res; | ||
1429 | |||
1430 | __asm__ __volatile__( | ||
1431 | " .set push \n" | ||
1432 | " .set mips32r2 \n" | ||
1433 | " .set noat \n" | ||
1434 | " .word 0x41610BC1 # dmt $1 \n" | ||
1435 | " ehb \n" | ||
1436 | " move %0, $1 \n" | ||
1437 | " .set pop \n" | ||
1438 | : "=r" (res)); | ||
1439 | |||
1440 | instruction_hazard(); | ||
1441 | |||
1442 | return res; | ||
1443 | } | ||
1444 | |||
1445 | #define __VPECONTROL_TE_SHIFT 15 | ||
1446 | #define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT) | ||
1447 | |||
1448 | #define __EMT_ENABLE __VPECONTROL_TE | ||
1449 | |||
1450 | static inline void __emt(unsigned int previous) | ||
1451 | { | ||
1452 | if ((previous & __EMT_ENABLE)) | ||
1453 | __asm__ __volatile__( | ||
1454 | " .set noreorder \n" | ||
1455 | " .set mips32r2 \n" | ||
1456 | " .word 0x41600be1 # emt \n" | ||
1457 | " ehb \n" | ||
1458 | " .set mips0 \n" | ||
1459 | " .set reorder \n"); | ||
1460 | } | ||
1461 | |||
1462 | static inline void __ehb(void) | ||
1463 | { | ||
1464 | __asm__ __volatile__( | ||
1465 | " ehb \n"); | ||
1466 | } | ||
1467 | |||
1468 | /* | ||
1469 | * Note that local_irq_save/restore affect TC-specific IXMT state, | ||
1470 | * not Status.IE as in non-SMTC kernel. | ||
1471 | */ | ||
1472 | |||
1357 | #define __BUILD_SET_C0(name) \ | 1473 | #define __BUILD_SET_C0(name) \ |
1358 | static inline unsigned int \ | 1474 | static inline unsigned int \ |
1359 | set_c0_##name(unsigned int set) \ | 1475 | set_c0_##name(unsigned int set) \ |
1360 | { \ | 1476 | { \ |
1361 | unsigned int res; \ | 1477 | unsigned int res; \ |
1478 | unsigned int omt; \ | ||
1479 | unsigned int flags; \ | ||
1362 | \ | 1480 | \ |
1481 | local_irq_save(flags); \ | ||
1482 | omt = __dmt(); \ | ||
1363 | res = read_c0_##name(); \ | 1483 | res = read_c0_##name(); \ |
1364 | res |= set; \ | 1484 | res |= set; \ |
1365 | write_c0_##name(res); \ | 1485 | write_c0_##name(res); \ |
1486 | __emt(omt); \ | ||
1487 | local_irq_restore(flags); \ | ||
1366 | \ | 1488 | \ |
1367 | return res; \ | 1489 | return res; \ |
1368 | } \ | 1490 | } \ |
@@ -1371,10 +1493,16 @@ static inline unsigned int \ | |||
1371 | clear_c0_##name(unsigned int clear) \ | 1493 | clear_c0_##name(unsigned int clear) \ |
1372 | { \ | 1494 | { \ |
1373 | unsigned int res; \ | 1495 | unsigned int res; \ |
1496 | unsigned int omt; \ | ||
1497 | unsigned int flags; \ | ||
1374 | \ | 1498 | \ |
1499 | local_irq_save(flags); \ | ||
1500 | omt = __dmt(); \ | ||
1375 | res = read_c0_##name(); \ | 1501 | res = read_c0_##name(); \ |
1376 | res &= ~clear; \ | 1502 | res &= ~clear; \ |
1377 | write_c0_##name(res); \ | 1503 | write_c0_##name(res); \ |
1504 | __emt(omt); \ | ||
1505 | local_irq_restore(flags); \ | ||
1378 | \ | 1506 | \ |
1379 | return res; \ | 1507 | return res; \ |
1380 | } \ | 1508 | } \ |
@@ -1383,14 +1511,22 @@ static inline unsigned int \ | |||
1383 | change_c0_##name(unsigned int change, unsigned int new) \ | 1511 | change_c0_##name(unsigned int change, unsigned int new) \ |
1384 | { \ | 1512 | { \ |
1385 | unsigned int res; \ | 1513 | unsigned int res; \ |
1514 | unsigned int omt; \ | ||
1515 | unsigned int flags; \ | ||
1386 | \ | 1516 | \ |
1517 | local_irq_save(flags); \ | ||
1518 | \ | ||
1519 | omt = __dmt(); \ | ||
1387 | res = read_c0_##name(); \ | 1520 | res = read_c0_##name(); \ |
1388 | res &= ~change; \ | 1521 | res &= ~change; \ |
1389 | res |= (new & change); \ | 1522 | res |= (new & change); \ |
1390 | write_c0_##name(res); \ | 1523 | write_c0_##name(res); \ |
1524 | __emt(omt); \ | ||
1525 | local_irq_restore(flags); \ | ||
1391 | \ | 1526 | \ |
1392 | return res; \ | 1527 | return res; \ |
1393 | } | 1528 | } |
1529 | #endif | ||
1394 | 1530 | ||
1395 | __BUILD_SET_C0(status) | 1531 | __BUILD_SET_C0(status) |
1396 | __BUILD_SET_C0(cause) | 1532 | __BUILD_SET_C0(cause) |
diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h index 61cf22588137..6e09f4c87211 100644 --- a/include/asm-mips/mmu_context.h +++ b/include/asm-mips/mmu_context.h | |||
@@ -17,6 +17,10 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
20 | #ifdef CONFIG_MIPS_MT_SMTC | ||
21 | #include <asm/mipsmtregs.h> | ||
22 | #include <asm/smtc.h> | ||
23 | #endif /* SMTC */ | ||
20 | 24 | ||
21 | /* | 25 | /* |
22 | * For the fast tlb miss handlers, we keep a per cpu array of pointers | 26 | * For the fast tlb miss handlers, we keep a per cpu array of pointers |
@@ -54,6 +58,14 @@ extern unsigned long pgd_current[]; | |||
54 | #define ASID_INC 0x1 | 58 | #define ASID_INC 0x1 |
55 | #define ASID_MASK 0xfff | 59 | #define ASID_MASK 0xfff |
56 | 60 | ||
61 | /* SMTC/34K debug hack - but maybe we'll keep it */ | ||
62 | #elif defined(CONFIG_MIPS_MT_SMTC) | ||
63 | |||
64 | #define ASID_INC 0x1 | ||
65 | extern unsigned long smtc_asid_mask; | ||
66 | #define ASID_MASK (smtc_asid_mask) | ||
67 | #define HW_ASID_MASK 0xff | ||
68 | /* End SMTC/34K debug hack */ | ||
57 | #else /* FIXME: not correct for R6000 */ | 69 | #else /* FIXME: not correct for R6000 */ |
58 | 70 | ||
59 | #define ASID_INC 0x1 | 71 | #define ASID_INC 0x1 |
@@ -76,6 +88,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
76 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) | 88 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) |
77 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) | 89 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) |
78 | 90 | ||
91 | #ifndef CONFIG_MIPS_MT_SMTC | ||
92 | /* Normal, classic MIPS get_new_mmu_context */ | ||
79 | static inline void | 93 | static inline void |
80 | get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | 94 | get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) |
81 | { | 95 | { |
@@ -91,6 +105,12 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
91 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; | 105 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; |
92 | } | 106 | } |
93 | 107 | ||
108 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
109 | |||
110 | #define get_new_mmu_context(mm,cpu) smtc_get_new_mmu_context((mm),(cpu)) | ||
111 | |||
112 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
113 | |||
94 | /* | 114 | /* |
95 | * Initialize the context related info for a new mm_struct | 115 | * Initialize the context related info for a new mm_struct |
96 | * instance. | 116 | * instance. |
@@ -111,14 +131,46 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
111 | { | 131 | { |
112 | unsigned int cpu = smp_processor_id(); | 132 | unsigned int cpu = smp_processor_id(); |
113 | unsigned long flags; | 133 | unsigned long flags; |
114 | 134 | #ifdef CONFIG_MIPS_MT_SMTC | |
135 | unsigned long oldasid; | ||
136 | unsigned long mtflags; | ||
137 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
115 | local_irq_save(flags); | 138 | local_irq_save(flags); |
139 | mtflags = dvpe(); | ||
140 | #else /* Not SMTC */ | ||
141 | local_irq_save(flags); | ||
142 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
116 | 143 | ||
117 | /* Check if our ASID is of an older version and thus invalid */ | 144 | /* Check if our ASID is of an older version and thus invalid */ |
118 | if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) | 145 | if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) |
119 | get_new_mmu_context(next, cpu); | 146 | get_new_mmu_context(next, cpu); |
120 | 147 | #ifdef CONFIG_MIPS_MT_SMTC | |
148 | /* | ||
149 | * If the EntryHi ASID being replaced happens to be | ||
150 | * the value flagged at ASID recycling time as having | ||
151 | * an extended life, clear the bit showing it being | ||
152 | * in use by this "CPU", and if that's the last bit, | ||
153 | * free up the ASID value for use and flush any old | ||
154 | * instances of it from the TLB. | ||
155 | */ | ||
156 | oldasid = (read_c0_entryhi() & ASID_MASK); | ||
157 | if(smtc_live_asid[mytlb][oldasid]) { | ||
158 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
159 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
160 | smtc_flush_tlb_asid(oldasid); | ||
161 | } | ||
162 | /* | ||
163 | * Tread softly on EntryHi, and so long as we support | ||
164 | * having ASID_MASK smaller than the hardware maximum, | ||
165 | * make sure no "soft" bits become "hard"... | ||
166 | */ | ||
167 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | ||
168 | | (cpu_context(cpu, next) & ASID_MASK)); | ||
169 | ehb(); /* Make sure it propagates to TCStatus */ | ||
170 | evpe(mtflags); | ||
171 | #else | ||
121 | write_c0_entryhi(cpu_context(cpu, next)); | 172 | write_c0_entryhi(cpu_context(cpu, next)); |
173 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
122 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); | 174 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
123 | 175 | ||
124 | /* | 176 | /* |
@@ -151,12 +203,34 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) | |||
151 | unsigned long flags; | 203 | unsigned long flags; |
152 | unsigned int cpu = smp_processor_id(); | 204 | unsigned int cpu = smp_processor_id(); |
153 | 205 | ||
206 | #ifdef CONFIG_MIPS_MT_SMTC | ||
207 | unsigned long oldasid; | ||
208 | unsigned long mtflags; | ||
209 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
210 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
211 | |||
154 | local_irq_save(flags); | 212 | local_irq_save(flags); |
155 | 213 | ||
156 | /* Unconditionally get a new ASID. */ | 214 | /* Unconditionally get a new ASID. */ |
157 | get_new_mmu_context(next, cpu); | 215 | get_new_mmu_context(next, cpu); |
158 | 216 | ||
217 | #ifdef CONFIG_MIPS_MT_SMTC | ||
218 | /* See comments for similar code above */ | ||
219 | mtflags = dvpe(); | ||
220 | oldasid = read_c0_entryhi() & ASID_MASK; | ||
221 | if(smtc_live_asid[mytlb][oldasid]) { | ||
222 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
223 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
224 | smtc_flush_tlb_asid(oldasid); | ||
225 | } | ||
226 | /* See comments for similar code above */ | ||
227 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | | ||
228 | (cpu_context(cpu, next) & ASID_MASK)); | ||
229 | ehb(); /* Make sure it propagates to TCStatus */ | ||
230 | evpe(mtflags); | ||
231 | #else | ||
159 | write_c0_entryhi(cpu_context(cpu, next)); | 232 | write_c0_entryhi(cpu_context(cpu, next)); |
233 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
160 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); | 234 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
161 | 235 | ||
162 | /* mark mmu ownership change */ | 236 | /* mark mmu ownership change */ |
@@ -174,17 +248,49 @@ static inline void | |||
174 | drop_mmu_context(struct mm_struct *mm, unsigned cpu) | 248 | drop_mmu_context(struct mm_struct *mm, unsigned cpu) |
175 | { | 249 | { |
176 | unsigned long flags; | 250 | unsigned long flags; |
251 | #ifdef CONFIG_MIPS_MT_SMTC | ||
252 | unsigned long oldasid; | ||
253 | /* Can't use spinlock because called from TLB flush within DVPE */ | ||
254 | unsigned int prevvpe; | ||
255 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
256 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
177 | 257 | ||
178 | local_irq_save(flags); | 258 | local_irq_save(flags); |
179 | 259 | ||
180 | if (cpu_isset(cpu, mm->cpu_vm_mask)) { | 260 | if (cpu_isset(cpu, mm->cpu_vm_mask)) { |
181 | get_new_mmu_context(mm, cpu); | 261 | get_new_mmu_context(mm, cpu); |
262 | #ifdef CONFIG_MIPS_MT_SMTC | ||
263 | /* See comments for similar code above */ | ||
264 | prevvpe = dvpe(); | ||
265 | oldasid = (read_c0_entryhi() & ASID_MASK); | ||
266 | if(smtc_live_asid[mytlb][oldasid]) { | ||
267 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
268 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
269 | smtc_flush_tlb_asid(oldasid); | ||
270 | } | ||
271 | /* See comments for similar code above */ | ||
272 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | ||
273 | | cpu_asid(cpu, mm)); | ||
274 | ehb(); /* Make sure it propagates to TCStatus */ | ||
275 | evpe(prevvpe); | ||
276 | #else /* not CONFIG_MIPS_MT_SMTC */ | ||
182 | write_c0_entryhi(cpu_asid(cpu, mm)); | 277 | write_c0_entryhi(cpu_asid(cpu, mm)); |
278 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
183 | } else { | 279 | } else { |
184 | /* will get a new context next time */ | 280 | /* will get a new context next time */ |
281 | #ifndef CONFIG_MIPS_MT_SMTC | ||
185 | cpu_context(cpu, mm) = 0; | 282 | cpu_context(cpu, mm) = 0; |
283 | #else /* SMTC */ | ||
284 | int i; | ||
285 | |||
286 | /* SMTC shares the TLB (and ASIDs) across VPEs */ | ||
287 | for (i = 0; i < num_online_cpus(); i++) { | ||
288 | if((smtc_status & SMTC_TLB_SHARED) | ||
289 | || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | ||
290 | cpu_context(i, mm) = 0; | ||
291 | } | ||
292 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
186 | } | 293 | } |
187 | |||
188 | local_irq_restore(flags); | 294 | local_irq_restore(flags); |
189 | } | 295 | } |
190 | 296 | ||
diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h index 39d2bd50fece..0fb75f0762e0 100644 --- a/include/asm-mips/processor.h +++ b/include/asm-mips/processor.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define _ASM_PROCESSOR_H | 12 | #define _ASM_PROCESSOR_H |
13 | 13 | ||
14 | #include <linux/config.h> | 14 | #include <linux/config.h> |
15 | #include <linux/cpumask.h> | ||
15 | #include <linux/threads.h> | 16 | #include <linux/threads.h> |
16 | 17 | ||
17 | #include <asm/cachectl.h> | 18 | #include <asm/cachectl.h> |
@@ -107,6 +108,10 @@ struct mips_dsp_state { | |||
107 | 108 | ||
108 | #define INIT_DSP {{0,},} | 109 | #define INIT_DSP {{0,},} |
109 | 110 | ||
111 | #define INIT_CPUMASK { \ | ||
112 | {0,} \ | ||
113 | } | ||
114 | |||
110 | typedef struct { | 115 | typedef struct { |
111 | unsigned long seg; | 116 | unsigned long seg; |
112 | } mm_segment_t; | 117 | } mm_segment_t; |
@@ -129,6 +134,12 @@ struct thread_struct { | |||
129 | 134 | ||
130 | /* Saved fpu/fpu emulator stuff. */ | 135 | /* Saved fpu/fpu emulator stuff. */ |
131 | union mips_fpu_union fpu; | 136 | union mips_fpu_union fpu; |
137 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
138 | /* Emulated instruction count */ | ||
139 | unsigned long emulated_fp; | ||
140 | /* Saved per-thread scheduler affinity mask */ | ||
141 | cpumask_t user_cpus_allowed; | ||
142 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
132 | 143 | ||
133 | /* Saved state of the DSP ASE, if available. */ | 144 | /* Saved state of the DSP ASE, if available. */ |
134 | struct mips_dsp_state dsp; | 145 | struct mips_dsp_state dsp; |
@@ -142,6 +153,7 @@ struct thread_struct { | |||
142 | #define MF_LOGADE 2 /* Log address errors to syslog */ | 153 | #define MF_LOGADE 2 /* Log address errors to syslog */ |
143 | #define MF_32BIT_REGS 4 /* also implies 16/32 fprs */ | 154 | #define MF_32BIT_REGS 4 /* also implies 16/32 fprs */ |
144 | #define MF_32BIT_ADDR 8 /* 32-bit address space (o32/n32) */ | 155 | #define MF_32BIT_ADDR 8 /* 32-bit address space (o32/n32) */ |
156 | #define MF_FPUBOUND 0x10 /* thread bound to FPU-full CPU set */ | ||
145 | unsigned long mflags; | 157 | unsigned long mflags; |
146 | unsigned long irix_trampoline; /* Wheee... */ | 158 | unsigned long irix_trampoline; /* Wheee... */ |
147 | unsigned long irix_oldctx; | 159 | unsigned long irix_oldctx; |
@@ -153,6 +165,12 @@ struct thread_struct { | |||
153 | #define MF_N32 MF_32BIT_ADDR | 165 | #define MF_N32 MF_32BIT_ADDR |
154 | #define MF_N64 0 | 166 | #define MF_N64 0 |
155 | 167 | ||
168 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
169 | #define FPAFF_INIT 0, INIT_CPUMASK, | ||
170 | #else | ||
171 | #define FPAFF_INIT | ||
172 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
173 | |||
156 | #define INIT_THREAD { \ | 174 | #define INIT_THREAD { \ |
157 | /* \ | 175 | /* \ |
158 | * saved main processor registers \ | 176 | * saved main processor registers \ |
@@ -168,6 +186,10 @@ struct thread_struct { | |||
168 | */ \ | 186 | */ \ |
169 | INIT_FPU, \ | 187 | INIT_FPU, \ |
170 | /* \ | 188 | /* \ |
189 | * fpu affinity state (null if not FPAFF) \ | ||
190 | */ \ | ||
191 | FPAFF_INIT \ | ||
192 | /* \ | ||
171 | * saved dsp/dsp emulator stuff \ | 193 | * saved dsp/dsp emulator stuff \ |
172 | */ \ | 194 | */ \ |
173 | INIT_DSP, \ | 195 | INIT_DSP, \ |
diff --git a/include/asm-mips/ptrace.h b/include/asm-mips/ptrace.h index 95c5839ac465..fa9d8713c12a 100644 --- a/include/asm-mips/ptrace.h +++ b/include/asm-mips/ptrace.h | |||
@@ -45,6 +45,10 @@ struct pt_regs { | |||
45 | unsigned long cp0_badvaddr; | 45 | unsigned long cp0_badvaddr; |
46 | unsigned long cp0_cause; | 46 | unsigned long cp0_cause; |
47 | unsigned long cp0_epc; | 47 | unsigned long cp0_epc; |
48 | #ifdef CONFIG_MIPS_MT_SMTC | ||
49 | unsigned long cp0_tcstatus; | ||
50 | unsigned long smtc_pad; | ||
51 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
48 | }; | 52 | }; |
49 | 53 | ||
50 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ | 54 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ |
diff --git a/include/asm-mips/r4kcache.h b/include/asm-mips/r4kcache.h index 90c374700977..3c8e3c8d1a9a 100644 --- a/include/asm-mips/r4kcache.h +++ b/include/asm-mips/r4kcache.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/asm.h> | 15 | #include <asm/asm.h> |
16 | #include <asm/cacheops.h> | 16 | #include <asm/cacheops.h> |
17 | #include <asm/cpu-features.h> | 17 | #include <asm/cpu-features.h> |
18 | #include <asm/mipsmtregs.h> | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * This macro return a properly sign-extended address suitable as base address | 21 | * This macro return a properly sign-extended address suitable as base address |
@@ -37,16 +38,120 @@ | |||
37 | " cache %0, %1 \n" \ | 38 | " cache %0, %1 \n" \ |
38 | " .set pop \n" \ | 39 | " .set pop \n" \ |
39 | : \ | 40 | : \ |
40 | : "i" (op), "m" (*(unsigned char *)(addr))) | 41 | : "i" (op), "R" (*(unsigned char *)(addr))) |
42 | |||
43 | #ifdef CONFIG_MIPS_MT | ||
44 | /* | ||
45 | * Temporary hacks for SMTC debug. Optionally force single-threaded | ||
46 | * execution during I-cache flushes. | ||
47 | */ | ||
48 | |||
49 | #define PROTECT_CACHE_FLUSHES 1 | ||
50 | |||
51 | #ifdef PROTECT_CACHE_FLUSHES | ||
52 | |||
53 | extern int mt_protiflush; | ||
54 | extern int mt_protdflush; | ||
55 | extern void mt_cflush_lockdown(void); | ||
56 | extern void mt_cflush_release(void); | ||
57 | |||
58 | #define BEGIN_MT_IPROT \ | ||
59 | unsigned long flags = 0; \ | ||
60 | unsigned long mtflags = 0; \ | ||
61 | if(mt_protiflush) { \ | ||
62 | local_irq_save(flags); \ | ||
63 | ehb(); \ | ||
64 | mtflags = dvpe(); \ | ||
65 | mt_cflush_lockdown(); \ | ||
66 | } | ||
67 | |||
68 | #define END_MT_IPROT \ | ||
69 | if(mt_protiflush) { \ | ||
70 | mt_cflush_release(); \ | ||
71 | evpe(mtflags); \ | ||
72 | local_irq_restore(flags); \ | ||
73 | } | ||
74 | |||
75 | #define BEGIN_MT_DPROT \ | ||
76 | unsigned long flags = 0; \ | ||
77 | unsigned long mtflags = 0; \ | ||
78 | if(mt_protdflush) { \ | ||
79 | local_irq_save(flags); \ | ||
80 | ehb(); \ | ||
81 | mtflags = dvpe(); \ | ||
82 | mt_cflush_lockdown(); \ | ||
83 | } | ||
84 | |||
85 | #define END_MT_DPROT \ | ||
86 | if(mt_protdflush) { \ | ||
87 | mt_cflush_release(); \ | ||
88 | evpe(mtflags); \ | ||
89 | local_irq_restore(flags); \ | ||
90 | } | ||
91 | |||
92 | #else | ||
93 | |||
94 | #define BEGIN_MT_IPROT | ||
95 | #define BEGIN_MT_DPROT | ||
96 | #define END_MT_IPROT | ||
97 | #define END_MT_DPROT | ||
98 | |||
99 | #endif /* PROTECT_CACHE_FLUSHES */ | ||
100 | |||
101 | #define __iflush_prologue \ | ||
102 | unsigned long redundance; \ | ||
103 | extern int mt_n_iflushes; \ | ||
104 | BEGIN_MT_IPROT \ | ||
105 | for (redundance = 0; redundance < mt_n_iflushes; redundance++) { | ||
106 | |||
107 | #define __iflush_epilogue \ | ||
108 | END_MT_IPROT \ | ||
109 | } | ||
110 | |||
111 | #define __dflush_prologue \ | ||
112 | unsigned long redundance; \ | ||
113 | extern int mt_n_dflushes; \ | ||
114 | BEGIN_MT_DPROT \ | ||
115 | for (redundance = 0; redundance < mt_n_dflushes; redundance++) { | ||
116 | |||
117 | #define __dflush_epilogue \ | ||
118 | END_MT_DPROT \ | ||
119 | } | ||
120 | |||
121 | #define __inv_dflush_prologue __dflush_prologue | ||
122 | #define __inv_dflush_epilogue __dflush_epilogue | ||
123 | #define __sflush_prologue { | ||
124 | #define __sflush_epilogue } | ||
125 | #define __inv_sflush_prologue __sflush_prologue | ||
126 | #define __inv_sflush_epilogue __sflush_epilogue | ||
127 | |||
128 | #else /* CONFIG_MIPS_MT */ | ||
129 | |||
130 | #define __iflush_prologue { | ||
131 | #define __iflush_epilogue } | ||
132 | #define __dflush_prologue { | ||
133 | #define __dflush_epilogue } | ||
134 | #define __inv_dflush_prologue { | ||
135 | #define __inv_dflush_epilogue } | ||
136 | #define __sflush_prologue { | ||
137 | #define __sflush_epilogue } | ||
138 | #define __inv_sflush_prologue { | ||
139 | #define __inv_sflush_epilogue } | ||
140 | |||
141 | #endif /* CONFIG_MIPS_MT */ | ||
41 | 142 | ||
42 | static inline void flush_icache_line_indexed(unsigned long addr) | 143 | static inline void flush_icache_line_indexed(unsigned long addr) |
43 | { | 144 | { |
145 | __iflush_prologue | ||
44 | cache_op(Index_Invalidate_I, addr); | 146 | cache_op(Index_Invalidate_I, addr); |
147 | __iflush_epilogue | ||
45 | } | 148 | } |
46 | 149 | ||
47 | static inline void flush_dcache_line_indexed(unsigned long addr) | 150 | static inline void flush_dcache_line_indexed(unsigned long addr) |
48 | { | 151 | { |
152 | __dflush_prologue | ||
49 | cache_op(Index_Writeback_Inv_D, addr); | 153 | cache_op(Index_Writeback_Inv_D, addr); |
154 | __dflush_epilogue | ||
50 | } | 155 | } |
51 | 156 | ||
52 | static inline void flush_scache_line_indexed(unsigned long addr) | 157 | static inline void flush_scache_line_indexed(unsigned long addr) |
@@ -56,17 +161,23 @@ static inline void flush_scache_line_indexed(unsigned long addr) | |||
56 | 161 | ||
57 | static inline void flush_icache_line(unsigned long addr) | 162 | static inline void flush_icache_line(unsigned long addr) |
58 | { | 163 | { |
164 | __iflush_prologue | ||
59 | cache_op(Hit_Invalidate_I, addr); | 165 | cache_op(Hit_Invalidate_I, addr); |
166 | __iflush_epilogue | ||
60 | } | 167 | } |
61 | 168 | ||
62 | static inline void flush_dcache_line(unsigned long addr) | 169 | static inline void flush_dcache_line(unsigned long addr) |
63 | { | 170 | { |
171 | __dflush_prologue | ||
64 | cache_op(Hit_Writeback_Inv_D, addr); | 172 | cache_op(Hit_Writeback_Inv_D, addr); |
173 | __dflush_epilogue | ||
65 | } | 174 | } |
66 | 175 | ||
67 | static inline void invalidate_dcache_line(unsigned long addr) | 176 | static inline void invalidate_dcache_line(unsigned long addr) |
68 | { | 177 | { |
178 | __dflush_prologue | ||
69 | cache_op(Hit_Invalidate_D, addr); | 179 | cache_op(Hit_Invalidate_D, addr); |
180 | __dflush_epilogue | ||
70 | } | 181 | } |
71 | 182 | ||
72 | static inline void invalidate_scache_line(unsigned long addr) | 183 | static inline void invalidate_scache_line(unsigned long addr) |
@@ -239,9 +350,13 @@ static inline void blast_##pfx##cache##lsize(void) \ | |||
239 | current_cpu_data.desc.waybit; \ | 350 | current_cpu_data.desc.waybit; \ |
240 | unsigned long ws, addr; \ | 351 | unsigned long ws, addr; \ |
241 | \ | 352 | \ |
353 | __##pfx##flush_prologue \ | ||
354 | \ | ||
242 | for (ws = 0; ws < ws_end; ws += ws_inc) \ | 355 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
243 | for (addr = start; addr < end; addr += lsize * 32) \ | 356 | for (addr = start; addr < end; addr += lsize * 32) \ |
244 | cache##lsize##_unroll32(addr|ws,indexop); \ | 357 | cache##lsize##_unroll32(addr|ws,indexop); \ |
358 | \ | ||
359 | __##pfx##flush_epilogue \ | ||
245 | } \ | 360 | } \ |
246 | \ | 361 | \ |
247 | static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ | 362 | static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ |
@@ -249,10 +364,14 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ | |||
249 | unsigned long start = page; \ | 364 | unsigned long start = page; \ |
250 | unsigned long end = page + PAGE_SIZE; \ | 365 | unsigned long end = page + PAGE_SIZE; \ |
251 | \ | 366 | \ |
367 | __##pfx##flush_prologue \ | ||
368 | \ | ||
252 | do { \ | 369 | do { \ |
253 | cache##lsize##_unroll32(start,hitop); \ | 370 | cache##lsize##_unroll32(start,hitop); \ |
254 | start += lsize * 32; \ | 371 | start += lsize * 32; \ |
255 | } while (start < end); \ | 372 | } while (start < end); \ |
373 | \ | ||
374 | __##pfx##flush_epilogue \ | ||
256 | } \ | 375 | } \ |
257 | \ | 376 | \ |
258 | static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ | 377 | static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ |
@@ -265,9 +384,13 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) | |||
265 | current_cpu_data.desc.waybit; \ | 384 | current_cpu_data.desc.waybit; \ |
266 | unsigned long ws, addr; \ | 385 | unsigned long ws, addr; \ |
267 | \ | 386 | \ |
387 | __##pfx##flush_prologue \ | ||
388 | \ | ||
268 | for (ws = 0; ws < ws_end; ws += ws_inc) \ | 389 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
269 | for (addr = start; addr < end; addr += lsize * 32) \ | 390 | for (addr = start; addr < end; addr += lsize * 32) \ |
270 | cache##lsize##_unroll32(addr|ws,indexop); \ | 391 | cache##lsize##_unroll32(addr|ws,indexop); \ |
392 | \ | ||
393 | __##pfx##flush_epilogue \ | ||
271 | } | 394 | } |
272 | 395 | ||
273 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) | 396 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) |
@@ -288,12 +411,17 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \ | |||
288 | unsigned long lsize = cpu_##desc##_line_size(); \ | 411 | unsigned long lsize = cpu_##desc##_line_size(); \ |
289 | unsigned long addr = start & ~(lsize - 1); \ | 412 | unsigned long addr = start & ~(lsize - 1); \ |
290 | unsigned long aend = (end - 1) & ~(lsize - 1); \ | 413 | unsigned long aend = (end - 1) & ~(lsize - 1); \ |
414 | \ | ||
415 | __##pfx##flush_prologue \ | ||
416 | \ | ||
291 | while (1) { \ | 417 | while (1) { \ |
292 | prot##cache_op(hitop, addr); \ | 418 | prot##cache_op(hitop, addr); \ |
293 | if (addr == aend) \ | 419 | if (addr == aend) \ |
294 | break; \ | 420 | break; \ |
295 | addr += lsize; \ | 421 | addr += lsize; \ |
296 | } \ | 422 | } \ |
423 | \ | ||
424 | __##pfx##flush_epilogue \ | ||
297 | } | 425 | } |
298 | 426 | ||
299 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) | 427 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) |
diff --git a/include/asm-mips/rtc.h b/include/asm-mips/rtc.h index a2abc4572b63..82ad401c7dca 100644 --- a/include/asm-mips/rtc.h +++ b/include/asm-mips/rtc.h | |||
@@ -32,7 +32,7 @@ static inline unsigned int get_rtc_time(struct rtc_time *time) | |||
32 | { | 32 | { |
33 | unsigned long nowtime; | 33 | unsigned long nowtime; |
34 | 34 | ||
35 | nowtime = rtc_get_time(); | 35 | nowtime = rtc_mips_get_time(); |
36 | to_tm(nowtime, time); | 36 | to_tm(nowtime, time); |
37 | time->tm_year -= 1900; | 37 | time->tm_year -= 1900; |
38 | 38 | ||
@@ -47,7 +47,7 @@ static inline int set_rtc_time(struct rtc_time *time) | |||
47 | nowtime = mktime(time->tm_year+1900, time->tm_mon+1, | 47 | nowtime = mktime(time->tm_year+1900, time->tm_mon+1, |
48 | time->tm_mday, time->tm_hour, time->tm_min, | 48 | time->tm_mday, time->tm_hour, time->tm_min, |
49 | time->tm_sec); | 49 | time->tm_sec); |
50 | ret = rtc_set_time(nowtime); | 50 | ret = rtc_mips_set_time(nowtime); |
51 | 51 | ||
52 | return ret; | 52 | return ret; |
53 | } | 53 | } |
diff --git a/include/asm-mips/rtlx.h b/include/asm-mips/rtlx.h index 1298c3fdf6c9..76cd51c6be39 100644 --- a/include/asm-mips/rtlx.h +++ b/include/asm-mips/rtlx.h | |||
@@ -3,32 +3,46 @@ | |||
3 | * | 3 | * |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #ifndef _RTLX_H | 6 | #ifndef __ASM_RTLX_H |
7 | #define _RTLX_H_ | 7 | #define __ASM_RTLX_H_ |
8 | 8 | ||
9 | #define LX_NODE_BASE 10 | 9 | #define LX_NODE_BASE 10 |
10 | 10 | ||
11 | #define MIPSCPU_INT_BASE 16 | 11 | #define MIPSCPU_INT_BASE 16 |
12 | #define MIPS_CPU_RTLX_IRQ 0 | 12 | #define MIPS_CPU_RTLX_IRQ 0 |
13 | 13 | ||
14 | #define RTLX_VERSION 1 | 14 | #define RTLX_VERSION 2 |
15 | #define RTLX_xID 0x12345600 | 15 | #define RTLX_xID 0x12345600 |
16 | #define RTLX_ID (RTLX_xID | RTLX_VERSION) | 16 | #define RTLX_ID (RTLX_xID | RTLX_VERSION) |
17 | #define RTLX_CHANNELS 8 | 17 | #define RTLX_CHANNELS 8 |
18 | 18 | ||
19 | #define RTLX_BUFFER_SIZE 1024 | 19 | #define RTLX_CHANNEL_STDIO 0 |
20 | #define RTLX_CHANNEL_DBG 1 | ||
21 | #define RTLX_CHANNEL_SYSIO 2 | ||
20 | 22 | ||
21 | /* | 23 | extern int rtlx_open(int index, int can_sleep); |
22 | * lx_state bits | 24 | extern int rtlx_release(int index); |
23 | */ | 25 | extern ssize_t rtlx_read(int index, void *buff, size_t count, int user); |
24 | #define RTLX_STATE_OPENED 1UL | 26 | extern ssize_t rtlx_write(int index, void *buffer, size_t count, int user); |
27 | extern unsigned int rtlx_read_poll(int index, int can_sleep); | ||
28 | extern unsigned int rtlx_write_poll(int index); | ||
29 | |||
30 | enum rtlx_state { | ||
31 | RTLX_STATE_UNUSED, | ||
32 | RTLX_STATE_INITIALISED, | ||
33 | RTLX_STATE_REMOTE_READY, | ||
34 | RTLX_STATE_OPENED | ||
35 | }; | ||
36 | |||
37 | #define RTLX_BUFFER_SIZE 1024 | ||
25 | 38 | ||
26 | /* each channel supports read and write. | 39 | /* each channel supports read and write. |
27 | linux (vpe0) reads lx_buffer and writes rt_buffer | 40 | linux (vpe0) reads lx_buffer and writes rt_buffer |
28 | SP (vpe1) reads rt_buffer and writes lx_buffer | 41 | SP (vpe1) reads rt_buffer and writes lx_buffer |
29 | */ | 42 | */ |
30 | struct rtlx_channel { | 43 | struct rtlx_channel { |
31 | unsigned long lx_state; | 44 | enum rtlx_state rt_state; |
45 | enum rtlx_state lx_state; | ||
32 | 46 | ||
33 | int buffer_size; | 47 | int buffer_size; |
34 | 48 | ||
@@ -38,15 +52,13 @@ struct rtlx_channel { | |||
38 | 52 | ||
39 | int lx_write, lx_read; | 53 | int lx_write, lx_read; |
40 | char *lx_buffer; | 54 | char *lx_buffer; |
41 | |||
42 | void *queues; | ||
43 | |||
44 | }; | 55 | }; |
45 | 56 | ||
46 | struct rtlx_info { | 57 | struct rtlx_info { |
47 | unsigned long id; | 58 | unsigned long id; |
59 | enum rtlx_state state; | ||
48 | 60 | ||
49 | struct rtlx_channel channel[RTLX_CHANNELS]; | 61 | struct rtlx_channel channel[RTLX_CHANNELS]; |
50 | }; | 62 | }; |
51 | 63 | ||
52 | #endif /* _RTLX_H_ */ | 64 | #endif /* __ASM_RTLX_H_ */ |
diff --git a/include/asm-mips/serial.h b/include/asm-mips/serial.h index 7b2366412203..7196ceb0e948 100644 --- a/include/asm-mips/serial.h +++ b/include/asm-mips/serial.h | |||
@@ -77,15 +77,15 @@ | |||
77 | #include <asm/it8712.h> | 77 | #include <asm/it8712.h> |
78 | #define ITE_SERIAL_PORT_DEFNS \ | 78 | #define ITE_SERIAL_PORT_DEFNS \ |
79 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_UART_BASE), \ | 79 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_UART_BASE), \ |
80 | .irq = IT8172_UART_IRQ, .flags = STD_COM_FLAGS, .type = 0x3 }, \ | 80 | .irq = IT8172_UART_IRQ, .flags = STD_COM_FLAGS, .port = PORT_16550 }, \ |
81 | { .baud_base = (24000000/(16*13)), .port = (IT8172_PCI_IO_BASE + IT8712_UART1_PORT), \ | 81 | { .baud_base = (24000000/(16*13)), .port = (IT8172_PCI_IO_BASE + IT8712_UART1_PORT), \ |
82 | .irq = IT8172_SERIRQ_4, .flags = STD_COM_FLAGS, .type = 0x3 }, \ | 82 | .irq = IT8172_SERIRQ_4, .flags = STD_COM_FLAGS, .port = PORT_16550 }, \ |
83 | /* Smart Card Reader 0 */ \ | 83 | /* Smart Card Reader 0 */ \ |
84 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_SCR0_BASE), \ | 84 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_SCR0_BASE), \ |
85 | .irq = IT8172_SCR0_IRQ, .flags = STD_COM_FLAGS, .type = 0x3 }, \ | 85 | .irq = IT8172_SCR0_IRQ, .flags = STD_COM_FLAGS, .port = PORT_16550 }, \ |
86 | /* Smart Card Reader 1 */ \ | 86 | /* Smart Card Reader 1 */ \ |
87 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_SCR1_BASE), \ | 87 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_SCR1_BASE), \ |
88 | .irq = IT8172_SCR1_IRQ, .flags = STD_COM_FLAGS, .type = 0x3 }, | 88 | .irq = IT8172_SCR1_IRQ, .flags = STD_COM_FLAGS, .port = PORT_16550 }, |
89 | #else | 89 | #else |
90 | #define ITE_SERIAL_PORT_DEFNS | 90 | #define ITE_SERIAL_PORT_DEFNS |
91 | #endif | 91 | #endif |
@@ -95,10 +95,10 @@ | |||
95 | #include <asm/it8172/it8172_int.h> | 95 | #include <asm/it8172/it8172_int.h> |
96 | #define IVR_SERIAL_PORT_DEFNS \ | 96 | #define IVR_SERIAL_PORT_DEFNS \ |
97 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_UART_BASE), \ | 97 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_UART_BASE), \ |
98 | .irq = IT8172_UART_IRQ, .flags = STD_COM_FLAGS, .type = 0x3 }, \ | 98 | .irq = IT8172_UART_IRQ, .flags = STD_COM_FLAGS, .port = PORT_16550 }, \ |
99 | /* Smart Card Reader 1 */ \ | 99 | /* Smart Card Reader 1 */ \ |
100 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_SCR1_BASE), \ | 100 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_SCR1_BASE), \ |
101 | .irq = IT8172_SCR1_IRQ, .flags = STD_COM_FLAGS, .type = 0x3 }, | 101 | .irq = IT8172_SCR1_IRQ, .flags = STD_COM_FLAGS, .port = PORT_16550 }, |
102 | #else | 102 | #else |
103 | #define IVR_SERIAL_PORT_DEFNS | 103 | #define IVR_SERIAL_PORT_DEFNS |
104 | #endif | 104 | #endif |
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h new file mode 100644 index 000000000000..e1941d1b8726 --- /dev/null +++ b/include/asm-mips/smtc.h | |||
@@ -0,0 +1,55 @@ | |||
1 | #ifndef _ASM_SMTC_MT_H | ||
2 | #define _ASM_SMTC_MT_H | ||
3 | |||
4 | /* | ||
5 | * Definitions for SMTC multitasking on MIPS MT cores | ||
6 | */ | ||
7 | |||
8 | #include <asm/mips_mt.h> | ||
9 | |||
10 | /* | ||
11 | * System-wide SMTC status information | ||
12 | */ | ||
13 | |||
14 | extern unsigned int smtc_status; | ||
15 | |||
16 | #define SMTC_TLB_SHARED 0x00000001 | ||
17 | #define SMTC_MTC_ACTIVE 0x00000002 | ||
18 | |||
19 | /* | ||
20 | * TLB/ASID Management information | ||
21 | */ | ||
22 | |||
23 | #define MAX_SMTC_TLBS 2 | ||
24 | #define MAX_SMTC_ASIDS 256 | ||
25 | #if NR_CPUS <= 8 | ||
26 | typedef char asiduse; | ||
27 | #else | ||
28 | #if NR_CPUS <= 16 | ||
29 | typedef short asiduse; | ||
30 | #else | ||
31 | typedef long asiduse; | ||
32 | #endif | ||
33 | #endif | ||
34 | |||
35 | extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | ||
36 | |||
37 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); | ||
38 | |||
39 | void smtc_flush_tlb_asid(unsigned long asid); | ||
40 | extern int mipsmt_build_cpu_map(int startslot); | ||
41 | extern void mipsmt_prepare_cpus(void); | ||
42 | extern void smtc_smp_finish(void); | ||
43 | extern void smtc_boot_secondary(int cpu, struct task_struct *t); | ||
44 | |||
45 | /* | ||
46 | * Sharing the TLB between multiple VPEs means that the | ||
47 | * "random" index selection function is not allowed to | ||
48 | * select the current value of the Index register. To | ||
49 | * avoid additional TLB pressure, the Index registers | ||
50 | * are "parked" with an non-Valid value. | ||
51 | */ | ||
52 | |||
53 | #define PARKED_INDEX ((unsigned int)0x80000000) | ||
54 | |||
55 | #endif /* _ASM_SMTC_MT_H */ | ||
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h new file mode 100644 index 000000000000..f22c3e2f993a --- /dev/null +++ b/include/asm-mips/smtc_ipi.h | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code. | ||
3 | */ | ||
4 | #ifndef __ASM_SMTC_IPI_H | ||
5 | #define __ASM_SMTC_IPI_H | ||
6 | |||
7 | //#define SMTC_IPI_DEBUG | ||
8 | |||
9 | #ifdef SMTC_IPI_DEBUG | ||
10 | #include <asm/mipsregs.h> | ||
11 | #include <asm/mipsmtregs.h> | ||
12 | #endif /* SMTC_IPI_DEBUG */ | ||
13 | |||
14 | /* | ||
15 | * An IPI "message" | ||
16 | */ | ||
17 | |||
18 | struct smtc_ipi { | ||
19 | struct smtc_ipi *flink; | ||
20 | int type; | ||
21 | void *arg; | ||
22 | int dest; | ||
23 | #ifdef SMTC_IPI_DEBUG | ||
24 | int sender; | ||
25 | long stamp; | ||
26 | #endif /* SMTC_IPI_DEBUG */ | ||
27 | }; | ||
28 | |||
29 | /* | ||
30 | * Defined IPI Types | ||
31 | */ | ||
32 | |||
33 | #define LINUX_SMP_IPI 1 | ||
34 | #define SMTC_CLOCK_TICK 2 | ||
35 | |||
36 | /* | ||
37 | * A queue of IPI messages | ||
38 | */ | ||
39 | |||
40 | struct smtc_ipi_q { | ||
41 | struct smtc_ipi *head; | ||
42 | spinlock_t lock; | ||
43 | struct smtc_ipi *tail; | ||
44 | int depth; | ||
45 | }; | ||
46 | |||
47 | extern struct smtc_ipi_q IPIQ[NR_CPUS]; | ||
48 | extern struct smtc_ipi_q freeIPIq; | ||
49 | |||
50 | static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) | ||
51 | { | ||
52 | long flags; | ||
53 | |||
54 | spin_lock_irqsave(&q->lock, flags); | ||
55 | if (q->head == NULL) | ||
56 | q->head = q->tail = p; | ||
57 | else | ||
58 | q->tail->flink = p; | ||
59 | p->flink = NULL; | ||
60 | q->tail = p; | ||
61 | q->depth++; | ||
62 | #ifdef SMTC_IPI_DEBUG | ||
63 | p->sender = read_c0_tcbind(); | ||
64 | p->stamp = read_c0_count(); | ||
65 | #endif /* SMTC_IPI_DEBUG */ | ||
66 | spin_unlock_irqrestore(&q->lock, flags); | ||
67 | } | ||
68 | |||
69 | static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) | ||
70 | { | ||
71 | struct smtc_ipi *p; | ||
72 | long flags; | ||
73 | |||
74 | spin_lock_irqsave(&q->lock, flags); | ||
75 | if (q->head == NULL) | ||
76 | p = NULL; | ||
77 | else { | ||
78 | p = q->head; | ||
79 | q->head = q->head->flink; | ||
80 | q->depth--; | ||
81 | /* Arguably unnecessary, but leaves queue cleaner */ | ||
82 | if (q->head == NULL) | ||
83 | q->tail = NULL; | ||
84 | } | ||
85 | spin_unlock_irqrestore(&q->lock, flags); | ||
86 | return p; | ||
87 | } | ||
88 | |||
89 | static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p) | ||
90 | { | ||
91 | long flags; | ||
92 | |||
93 | spin_lock_irqsave(&q->lock, flags); | ||
94 | if (q->head == NULL) { | ||
95 | q->head = q->tail = p; | ||
96 | p->flink = NULL; | ||
97 | } else { | ||
98 | p->flink = q->head; | ||
99 | q->head = p; | ||
100 | } | ||
101 | q->depth++; | ||
102 | spin_unlock_irqrestore(&q->lock, flags); | ||
103 | } | ||
104 | |||
105 | static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q) | ||
106 | { | ||
107 | long flags; | ||
108 | int retval; | ||
109 | |||
110 | spin_lock_irqsave(&q->lock, flags); | ||
111 | retval = q->depth; | ||
112 | spin_unlock_irqrestore(&q->lock, flags); | ||
113 | return retval; | ||
114 | } | ||
115 | |||
116 | extern void smtc_send_ipi(int cpu, int type, unsigned int action); | ||
117 | |||
118 | #endif /* __ASM_SMTC_IPI_H */ | ||
diff --git a/include/asm-mips/smtc_proc.h b/include/asm-mips/smtc_proc.h new file mode 100644 index 000000000000..25da651f1f5f --- /dev/null +++ b/include/asm-mips/smtc_proc.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Definitions for SMTC /proc entries | ||
3 | * Copyright(C) 2005 MIPS Technologies Inc. | ||
4 | */ | ||
5 | #ifndef __ASM_SMTC_PROC_H | ||
6 | #define __ASM_SMTC_PROC_H | ||
7 | |||
8 | /* | ||
9 | * per-"CPU" statistics | ||
10 | */ | ||
11 | |||
12 | struct smtc_cpu_proc { | ||
13 | unsigned long timerints; | ||
14 | unsigned long selfipis; | ||
15 | }; | ||
16 | |||
17 | extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; | ||
18 | |||
19 | /* Count of number of recoveries of "stolen" FPU access rights on 34K */ | ||
20 | |||
21 | extern atomic_t smtc_fpu_recoveries; | ||
22 | |||
23 | #endif /* __ASM_SMTC_PROC_H */ | ||
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h index 2acf3e844f00..c4856a874965 100644 --- a/include/asm-mips/stackframe.h +++ b/include/asm-mips/stackframe.h | |||
@@ -14,9 +14,14 @@ | |||
14 | #include <linux/threads.h> | 14 | #include <linux/threads.h> |
15 | 15 | ||
16 | #include <asm/asm.h> | 16 | #include <asm/asm.h> |
17 | #include <asm/asmmacro.h> | ||
17 | #include <asm/mipsregs.h> | 18 | #include <asm/mipsregs.h> |
18 | #include <asm/asm-offsets.h> | 19 | #include <asm/asm-offsets.h> |
19 | 20 | ||
21 | #ifdef CONFIG_MIPS_MT_SMTC | ||
22 | #include <asm/mipsmtregs.h> | ||
23 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
24 | |||
20 | .macro SAVE_AT | 25 | .macro SAVE_AT |
21 | .set push | 26 | .set push |
22 | .set noat | 27 | .set noat |
@@ -57,13 +62,30 @@ | |||
57 | #ifdef CONFIG_SMP | 62 | #ifdef CONFIG_SMP |
58 | .macro get_saved_sp /* SMP variation */ | 63 | .macro get_saved_sp /* SMP variation */ |
59 | #ifdef CONFIG_32BIT | 64 | #ifdef CONFIG_32BIT |
65 | #ifdef CONFIG_MIPS_MT_SMTC | ||
66 | .set mips32 | ||
67 | mfc0 k0, CP0_TCBIND; | ||
68 | .set mips0 | ||
69 | lui k1, %hi(kernelsp) | ||
70 | srl k0, k0, 19 | ||
71 | /* No need to shift down and up to clear bits 0-1 */ | ||
72 | #else | ||
60 | mfc0 k0, CP0_CONTEXT | 73 | mfc0 k0, CP0_CONTEXT |
61 | lui k1, %hi(kernelsp) | 74 | lui k1, %hi(kernelsp) |
62 | srl k0, k0, 23 | 75 | srl k0, k0, 23 |
76 | #endif | ||
63 | addu k1, k0 | 77 | addu k1, k0 |
64 | LONG_L k1, %lo(kernelsp)(k1) | 78 | LONG_L k1, %lo(kernelsp)(k1) |
65 | #endif | 79 | #endif |
66 | #ifdef CONFIG_64BIT | 80 | #ifdef CONFIG_64BIT |
81 | #ifdef CONFIG_MIPS_MT_SMTC | ||
82 | .set mips64 | ||
83 | mfc0 k0, CP0_TCBIND; | ||
84 | .set mips0 | ||
85 | lui k0, %highest(kernelsp) | ||
86 | dsrl k1, 19 | ||
87 | /* No need to shift down and up to clear bits 0-2 */ | ||
88 | #else | ||
67 | MFC0 k1, CP0_CONTEXT | 89 | MFC0 k1, CP0_CONTEXT |
68 | lui k0, %highest(kernelsp) | 90 | lui k0, %highest(kernelsp) |
69 | dsrl k1, 23 | 91 | dsrl k1, 23 |
@@ -71,20 +93,31 @@ | |||
71 | dsll k0, k0, 16 | 93 | dsll k0, k0, 16 |
72 | daddiu k0, %hi(kernelsp) | 94 | daddiu k0, %hi(kernelsp) |
73 | dsll k0, k0, 16 | 95 | dsll k0, k0, 16 |
96 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
74 | daddu k1, k1, k0 | 97 | daddu k1, k1, k0 |
75 | LONG_L k1, %lo(kernelsp)(k1) | 98 | LONG_L k1, %lo(kernelsp)(k1) |
76 | #endif | 99 | #endif /* CONFIG_64BIT */ |
77 | .endm | 100 | .endm |
78 | 101 | ||
79 | .macro set_saved_sp stackp temp temp2 | 102 | .macro set_saved_sp stackp temp temp2 |
80 | #ifdef CONFIG_32BIT | 103 | #ifdef CONFIG_32BIT |
104 | #ifdef CONFIG_MIPS_MT_SMTC | ||
105 | mfc0 \temp, CP0_TCBIND | ||
106 | srl \temp, 19 | ||
107 | #else | ||
81 | mfc0 \temp, CP0_CONTEXT | 108 | mfc0 \temp, CP0_CONTEXT |
82 | srl \temp, 23 | 109 | srl \temp, 23 |
83 | #endif | 110 | #endif |
111 | #endif | ||
84 | #ifdef CONFIG_64BIT | 112 | #ifdef CONFIG_64BIT |
113 | #ifdef CONFIG_MIPS_MT_SMTC | ||
114 | mfc0 \temp, CP0_TCBIND | ||
115 | dsrl \temp, 19 | ||
116 | #else | ||
85 | MFC0 \temp, CP0_CONTEXT | 117 | MFC0 \temp, CP0_CONTEXT |
86 | dsrl \temp, 23 | 118 | dsrl \temp, 23 |
87 | #endif | 119 | #endif |
120 | #endif | ||
88 | LONG_S \stackp, kernelsp(\temp) | 121 | LONG_S \stackp, kernelsp(\temp) |
89 | .endm | 122 | .endm |
90 | #else | 123 | #else |
@@ -122,10 +155,25 @@ | |||
122 | PTR_SUBU sp, k1, PT_SIZE | 155 | PTR_SUBU sp, k1, PT_SIZE |
123 | LONG_S k0, PT_R29(sp) | 156 | LONG_S k0, PT_R29(sp) |
124 | LONG_S $3, PT_R3(sp) | 157 | LONG_S $3, PT_R3(sp) |
158 | /* | ||
159 | * You might think that you don't need to save $0, | ||
160 | * but the FPU emulator and gdb remote debug stub | ||
161 | * need it to operate correctly | ||
162 | */ | ||
125 | LONG_S $0, PT_R0(sp) | 163 | LONG_S $0, PT_R0(sp) |
126 | mfc0 v1, CP0_STATUS | 164 | mfc0 v1, CP0_STATUS |
127 | LONG_S $2, PT_R2(sp) | 165 | LONG_S $2, PT_R2(sp) |
128 | LONG_S v1, PT_STATUS(sp) | 166 | LONG_S v1, PT_STATUS(sp) |
167 | #ifdef CONFIG_MIPS_MT_SMTC | ||
168 | /* | ||
169 | * Ideally, these instructions would be shuffled in | ||
170 | * to cover the pipeline delay. | ||
171 | */ | ||
172 | .set mips32 | ||
173 | mfc0 v1, CP0_TCSTATUS | ||
174 | .set mips0 | ||
175 | LONG_S v1, PT_TCSTATUS(sp) | ||
176 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
129 | LONG_S $4, PT_R4(sp) | 177 | LONG_S $4, PT_R4(sp) |
130 | mfc0 v1, CP0_CAUSE | 178 | mfc0 v1, CP0_CAUSE |
131 | LONG_S $5, PT_R5(sp) | 179 | LONG_S $5, PT_R5(sp) |
@@ -234,14 +282,36 @@ | |||
234 | .endm | 282 | .endm |
235 | 283 | ||
236 | #else | 284 | #else |
285 | /* | ||
286 | * For SMTC kernel, global IE should be left set, and interrupts | ||
287 | * controlled exclusively via IXMT. | ||
288 | */ | ||
237 | 289 | ||
290 | #ifdef CONFIG_MIPS_MT_SMTC | ||
291 | #define STATMASK 0x1e | ||
292 | #else | ||
293 | #define STATMASK 0x1f | ||
294 | #endif | ||
238 | .macro RESTORE_SOME | 295 | .macro RESTORE_SOME |
239 | .set push | 296 | .set push |
240 | .set reorder | 297 | .set reorder |
241 | .set noat | 298 | .set noat |
299 | #ifdef CONFIG_MIPS_MT_SMTC | ||
300 | .set mips32r2 | ||
301 | /* | ||
302 | * This may not really be necessary if ints are already | ||
303 | * inhibited here. | ||
304 | */ | ||
305 | mfc0 v0, CP0_TCSTATUS | ||
306 | ori v0, TCSTATUS_IXMT | ||
307 | mtc0 v0, CP0_TCSTATUS | ||
308 | ehb | ||
309 | DMT 5 # dmt a1 | ||
310 | jal mips_ihb | ||
311 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
242 | mfc0 a0, CP0_STATUS | 312 | mfc0 a0, CP0_STATUS |
243 | ori a0, 0x1f | 313 | ori a0, STATMASK |
244 | xori a0, 0x1f | 314 | xori a0, STATMASK |
245 | mtc0 a0, CP0_STATUS | 315 | mtc0 a0, CP0_STATUS |
246 | li v1, 0xff00 | 316 | li v1, 0xff00 |
247 | and a0, v1 | 317 | and a0, v1 |
@@ -250,6 +320,26 @@ | |||
250 | and v0, v1 | 320 | and v0, v1 |
251 | or v0, a0 | 321 | or v0, a0 |
252 | mtc0 v0, CP0_STATUS | 322 | mtc0 v0, CP0_STATUS |
323 | #ifdef CONFIG_MIPS_MT_SMTC | ||
324 | /* | ||
325 | * Only after EXL/ERL have been restored to status can we | ||
326 | * restore TCStatus.IXMT. | ||
327 | */ | ||
328 | LONG_L v1, PT_TCSTATUS(sp) | ||
329 | ehb | ||
330 | mfc0 v0, CP0_TCSTATUS | ||
331 | andi v1, TCSTATUS_IXMT | ||
332 | /* We know that TCStatua.IXMT should be set from above */ | ||
333 | xori v0, v0, TCSTATUS_IXMT | ||
334 | or v0, v0, v1 | ||
335 | mtc0 v0, CP0_TCSTATUS | ||
336 | ehb | ||
337 | andi a1, a1, VPECONTROL_TE | ||
338 | beqz a1, 1f | ||
339 | emt | ||
340 | 1: | ||
341 | .set mips0 | ||
342 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
253 | LONG_L v1, PT_EPC(sp) | 343 | LONG_L v1, PT_EPC(sp) |
254 | MTC0 v1, CP0_EPC | 344 | MTC0 v1, CP0_EPC |
255 | LONG_L $31, PT_R31(sp) | 345 | LONG_L $31, PT_R31(sp) |
@@ -302,11 +392,33 @@ | |||
302 | * Set cp0 enable bit as sign that we're running on the kernel stack | 392 | * Set cp0 enable bit as sign that we're running on the kernel stack |
303 | */ | 393 | */ |
304 | .macro CLI | 394 | .macro CLI |
395 | #if !defined(CONFIG_MIPS_MT_SMTC) | ||
305 | mfc0 t0, CP0_STATUS | 396 | mfc0 t0, CP0_STATUS |
306 | li t1, ST0_CU0 | 0x1f | 397 | li t1, ST0_CU0 | 0x1f |
307 | or t0, t1 | 398 | or t0, t1 |
308 | xori t0, 0x1f | 399 | xori t0, 0x1f |
309 | mtc0 t0, CP0_STATUS | 400 | mtc0 t0, CP0_STATUS |
401 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
402 | /* | ||
403 | * For SMTC, we need to set privilege | ||
404 | * and disable interrupts only for the | ||
405 | * current TC, using the TCStatus register. | ||
406 | */ | ||
407 | mfc0 t0,CP0_TCSTATUS | ||
408 | /* Fortunately CU 0 is in the same place in both registers */ | ||
409 | /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ | ||
410 | li t1, ST0_CU0 | 0x08001c00 | ||
411 | or t0,t1 | ||
412 | /* Clear TKSU, leave IXMT */ | ||
413 | xori t0, 0x00001800 | ||
414 | mtc0 t0, CP0_TCSTATUS | ||
415 | ehb | ||
416 | /* We need to leave the global IE bit set, but clear EXL...*/ | ||
417 | mfc0 t0, CP0_STATUS | ||
418 | ori t0, ST0_EXL | ST0_ERL | ||
419 | xori t0, ST0_EXL | ST0_ERL | ||
420 | mtc0 t0, CP0_STATUS | ||
421 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
310 | irq_disable_hazard | 422 | irq_disable_hazard |
311 | .endm | 423 | .endm |
312 | 424 | ||
@@ -315,11 +427,35 @@ | |||
315 | * Set cp0 enable bit as sign that we're running on the kernel stack | 427 | * Set cp0 enable bit as sign that we're running on the kernel stack |
316 | */ | 428 | */ |
317 | .macro STI | 429 | .macro STI |
430 | #if !defined(CONFIG_MIPS_MT_SMTC) | ||
318 | mfc0 t0, CP0_STATUS | 431 | mfc0 t0, CP0_STATUS |
319 | li t1, ST0_CU0 | 0x1f | 432 | li t1, ST0_CU0 | 0x1f |
320 | or t0, t1 | 433 | or t0, t1 |
321 | xori t0, 0x1e | 434 | xori t0, 0x1e |
322 | mtc0 t0, CP0_STATUS | 435 | mtc0 t0, CP0_STATUS |
436 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
437 | /* | ||
438 | * For SMTC, we need to set privilege | ||
439 | * and enable interrupts only for the | ||
440 | * current TC, using the TCStatus register. | ||
441 | */ | ||
442 | ehb | ||
443 | mfc0 t0,CP0_TCSTATUS | ||
444 | /* Fortunately CU 0 is in the same place in both registers */ | ||
445 | /* Set TCU0, TKSU (for later inversion) and IXMT */ | ||
446 | li t1, ST0_CU0 | 0x08001c00 | ||
447 | or t0,t1 | ||
448 | /* Clear TKSU *and* IXMT */ | ||
449 | xori t0, 0x00001c00 | ||
450 | mtc0 t0, CP0_TCSTATUS | ||
451 | ehb | ||
452 | /* We need to leave the global IE bit set, but clear EXL...*/ | ||
453 | mfc0 t0, CP0_STATUS | ||
454 | ori t0, ST0_EXL | ||
455 | xori t0, ST0_EXL | ||
456 | mtc0 t0, CP0_STATUS | ||
457 | /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ | ||
458 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
323 | irq_enable_hazard | 459 | irq_enable_hazard |
324 | .endm | 460 | .endm |
325 | 461 | ||
@@ -328,11 +464,56 @@ | |||
328 | * Set cp0 enable bit as sign that we're running on the kernel stack | 464 | * Set cp0 enable bit as sign that we're running on the kernel stack |
329 | */ | 465 | */ |
330 | .macro KMODE | 466 | .macro KMODE |
467 | #ifdef CONFIG_MIPS_MT_SMTC | ||
468 | /* | ||
469 | * This gets baroque in SMTC. We want to | ||
470 | * protect the non-atomic clearing of EXL | ||
471 | * with DMT/EMT, but we don't want to take | ||
472 | * an interrupt while DMT is still in effect. | ||
473 | */ | ||
474 | |||
475 | /* KMODE gets invoked from both reorder and noreorder code */ | ||
476 | .set push | ||
477 | .set mips32r2 | ||
478 | .set noreorder | ||
479 | mfc0 v0, CP0_TCSTATUS | ||
480 | andi v1, v0, TCSTATUS_IXMT | ||
481 | ori v0, TCSTATUS_IXMT | ||
482 | mtc0 v0, CP0_TCSTATUS | ||
483 | ehb | ||
484 | DMT 2 # dmt v0 | ||
485 | /* | ||
486 | * We don't know a priori if ra is "live" | ||
487 | */ | ||
488 | move t0, ra | ||
489 | jal mips_ihb | ||
490 | nop /* delay slot */ | ||
491 | move ra, t0 | ||
492 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
331 | mfc0 t0, CP0_STATUS | 493 | mfc0 t0, CP0_STATUS |
332 | li t1, ST0_CU0 | 0x1e | 494 | li t1, ST0_CU0 | 0x1e |
333 | or t0, t1 | 495 | or t0, t1 |
334 | xori t0, 0x1e | 496 | xori t0, 0x1e |
335 | mtc0 t0, CP0_STATUS | 497 | mtc0 t0, CP0_STATUS |
498 | #ifdef CONFIG_MIPS_MT_SMTC | ||
499 | ehb | ||
500 | andi v0, v0, VPECONTROL_TE | ||
501 | beqz v0, 2f | ||
502 | nop /* delay slot */ | ||
503 | emt | ||
504 | 2: | ||
505 | mfc0 v0, CP0_TCSTATUS | ||
506 | /* Clear IXMT, then OR in previous value */ | ||
507 | ori v0, TCSTATUS_IXMT | ||
508 | xori v0, TCSTATUS_IXMT | ||
509 | or v0, v1, v0 | ||
510 | mtc0 v0, CP0_TCSTATUS | ||
511 | /* | ||
512 | * irq_disable_hazard below should expand to EHB | ||
513 | * on 24K/34K CPUS | ||
514 | */ | ||
515 | .set pop | ||
516 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
336 | irq_disable_hazard | 517 | irq_disable_hazard |
337 | .endm | 518 | .endm |
338 | 519 | ||
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 4097fac5ac3c..261f71d16a07 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h | |||
@@ -155,6 +155,37 @@ extern asmlinkage void *resume(void *last, void *next, void *next_ti); | |||
155 | 155 | ||
156 | struct task_struct; | 156 | struct task_struct; |
157 | 157 | ||
158 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
159 | |||
160 | /* | ||
161 | * Handle the scheduler resume end of FPU affinity management. We do this | ||
162 | * inline to try to keep the overhead down. If we have been forced to run on | ||
163 | * a "CPU" with an FPU because of a previous high level of FP computation, | ||
164 | * but did not actually use the FPU during the most recent time-slice (CU1 | ||
165 | * isn't set), we undo the restriction on cpus_allowed. | ||
166 | * | ||
167 | * We're not calling set_cpus_allowed() here, because we have no need to | ||
168 | * force prompt migration - we're already switching the current CPU to a | ||
169 | * different thread. | ||
170 | */ | ||
171 | |||
172 | #define switch_to(prev,next,last) \ | ||
173 | do { \ | ||
174 | if (cpu_has_fpu && \ | ||
175 | (prev->thread.mflags & MF_FPUBOUND) && \ | ||
176 | (!(KSTK_STATUS(prev) & ST0_CU1))) { \ | ||
177 | prev->thread.mflags &= ~MF_FPUBOUND; \ | ||
178 | prev->cpus_allowed = prev->thread.user_cpus_allowed; \ | ||
179 | } \ | ||
180 | if (cpu_has_dsp) \ | ||
181 | __save_dsp(prev); \ | ||
182 | next->thread.emulated_fp = 0; \ | ||
183 | (last) = resume(prev, next, next->thread_info); \ | ||
184 | if (cpu_has_dsp) \ | ||
185 | __restore_dsp(current); \ | ||
186 | } while(0) | ||
187 | |||
188 | #else | ||
158 | #define switch_to(prev,next,last) \ | 189 | #define switch_to(prev,next,last) \ |
159 | do { \ | 190 | do { \ |
160 | if (cpu_has_dsp) \ | 191 | if (cpu_has_dsp) \ |
@@ -163,6 +194,7 @@ do { \ | |||
163 | if (cpu_has_dsp) \ | 194 | if (cpu_has_dsp) \ |
164 | __restore_dsp(current); \ | 195 | __restore_dsp(current); \ |
165 | } while(0) | 196 | } while(0) |
197 | #endif | ||
166 | 198 | ||
167 | /* | 199 | /* |
168 | * On SMP systems, when the scheduler does migration-cost autodetection, | 200 | * On SMP systems, when the scheduler does migration-cost autodetection, |
@@ -440,8 +472,8 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, | |||
440 | extern void set_handler (unsigned long offset, void *addr, unsigned long len); | 472 | extern void set_handler (unsigned long offset, void *addr, unsigned long len); |
441 | extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); | 473 | extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); |
442 | extern void *set_vi_handler (int n, void *addr); | 474 | extern void *set_vi_handler (int n, void *addr); |
443 | extern void *set_vi_srs_handler (int n, void *addr, int regset); | ||
444 | extern void *set_except_vector(int n, void *addr); | 475 | extern void *set_except_vector(int n, void *addr); |
476 | extern unsigned long ebase; | ||
445 | extern void per_cpu_trap_init(void); | 477 | extern void per_cpu_trap_init(void); |
446 | 478 | ||
447 | extern NORET_TYPE void die(const char *, struct pt_regs *); | 479 | extern NORET_TYPE void die(const char *, struct pt_regs *); |
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h index b5c78a4a0192..1068fe9a0a58 100644 --- a/include/asm-mips/unistd.h +++ b/include/asm-mips/unistd.h | |||
@@ -324,16 +324,18 @@ | |||
324 | #define __NR_pselect6 (__NR_Linux + 301) | 324 | #define __NR_pselect6 (__NR_Linux + 301) |
325 | #define __NR_ppoll (__NR_Linux + 302) | 325 | #define __NR_ppoll (__NR_Linux + 302) |
326 | #define __NR_unshare (__NR_Linux + 303) | 326 | #define __NR_unshare (__NR_Linux + 303) |
327 | #define __NR_splice (__NR_Linux + 304) | ||
328 | #define __NR_sync_file_range (__NR_Linux + 305) | ||
327 | 329 | ||
328 | /* | 330 | /* |
329 | * Offset of the last Linux o32 flavoured syscall | 331 | * Offset of the last Linux o32 flavoured syscall |
330 | */ | 332 | */ |
331 | #define __NR_Linux_syscalls 303 | 333 | #define __NR_Linux_syscalls 305 |
332 | 334 | ||
333 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | 335 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
334 | 336 | ||
335 | #define __NR_O32_Linux 4000 | 337 | #define __NR_O32_Linux 4000 |
336 | #define __NR_O32_Linux_syscalls 303 | 338 | #define __NR_O32_Linux_syscalls 305 |
337 | 339 | ||
338 | #if _MIPS_SIM == _MIPS_SIM_ABI64 | 340 | #if _MIPS_SIM == _MIPS_SIM_ABI64 |
339 | 341 | ||
@@ -604,16 +606,18 @@ | |||
604 | #define __NR_pselect6 (__NR_Linux + 260) | 606 | #define __NR_pselect6 (__NR_Linux + 260) |
605 | #define __NR_ppoll (__NR_Linux + 261) | 607 | #define __NR_ppoll (__NR_Linux + 261) |
606 | #define __NR_unshare (__NR_Linux + 262) | 608 | #define __NR_unshare (__NR_Linux + 262) |
609 | #define __NR_splice (__NR_Linux + 263) | ||
610 | #define __NR_sync_file_range (__NR_Linux + 264) | ||
607 | 611 | ||
608 | /* | 612 | /* |
609 | * Offset of the last Linux 64-bit flavoured syscall | 613 | * Offset of the last Linux 64-bit flavoured syscall |
610 | */ | 614 | */ |
611 | #define __NR_Linux_syscalls 262 | 615 | #define __NR_Linux_syscalls 264 |
612 | 616 | ||
613 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ | 617 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ |
614 | 618 | ||
615 | #define __NR_64_Linux 5000 | 619 | #define __NR_64_Linux 5000 |
616 | #define __NR_64_Linux_syscalls 262 | 620 | #define __NR_64_Linux_syscalls 264 |
617 | 621 | ||
618 | #if _MIPS_SIM == _MIPS_SIM_NABI32 | 622 | #if _MIPS_SIM == _MIPS_SIM_NABI32 |
619 | 623 | ||
@@ -888,16 +892,18 @@ | |||
888 | #define __NR_pselect6 (__NR_Linux + 264) | 892 | #define __NR_pselect6 (__NR_Linux + 264) |
889 | #define __NR_ppoll (__NR_Linux + 265) | 893 | #define __NR_ppoll (__NR_Linux + 265) |
890 | #define __NR_unshare (__NR_Linux + 266) | 894 | #define __NR_unshare (__NR_Linux + 266) |
895 | #define __NR_splice (__NR_Linux + 267) | ||
896 | #define __NR_sync_file_range (__NR_Linux + 268) | ||
891 | 897 | ||
892 | /* | 898 | /* |
893 | * Offset of the last N32 flavoured syscall | 899 | * Offset of the last N32 flavoured syscall |
894 | */ | 900 | */ |
895 | #define __NR_Linux_syscalls 266 | 901 | #define __NR_Linux_syscalls 268 |
896 | 902 | ||
897 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ | 903 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ |
898 | 904 | ||
899 | #define __NR_N32_Linux 6000 | 905 | #define __NR_N32_Linux 6000 |
900 | #define __NR_N32_Linux_syscalls 266 | 906 | #define __NR_N32_Linux_syscalls 268 |
901 | 907 | ||
902 | #ifndef __ASSEMBLY__ | 908 | #ifndef __ASSEMBLY__ |
903 | 909 | ||
diff --git a/include/asm-mips/vpe.h b/include/asm-mips/vpe.h new file mode 100644 index 000000000000..c6e1b961537d --- /dev/null +++ b/include/asm-mips/vpe.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can distribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License (Version 2) as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
11 | * for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
15 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_VPE_H | ||
20 | #define _ASM_VPE_H | ||
21 | |||
22 | struct vpe_notifications { | ||
23 | void (*start)(int vpe); | ||
24 | void (*stop)(int vpe); | ||
25 | |||
26 | struct list_head list; | ||
27 | }; | ||
28 | |||
29 | |||
30 | extern int vpe_notify(int index, struct vpe_notifications *notify); | ||
31 | |||
32 | extern void *vpe_get_shared(int index); | ||
33 | extern int vpe_getuid(int index); | ||
34 | extern int vpe_getgid(int index); | ||
35 | extern char *vpe_getcwd(int index); | ||
36 | |||
37 | #endif /* _ASM_VPE_H */ | ||
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index 51f87d9993b6..7bc6d73b2823 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h | |||
@@ -54,6 +54,13 @@ | |||
54 | */ | 54 | */ |
55 | extern unsigned int virt_irq_to_real_map[NR_IRQS]; | 55 | extern unsigned int virt_irq_to_real_map[NR_IRQS]; |
56 | 56 | ||
57 | /* The maximum virtual IRQ number that we support. This | ||
58 | * can be set by the platform and will be reduced by the | ||
59 | * value of __irq_offset_value. It defaults to and is | ||
60 | * capped by (NR_IRQS - 1). | ||
61 | */ | ||
62 | extern unsigned int virt_irq_max; | ||
63 | |||
57 | /* Create a mapping for a real_irq if it doesn't already exist. | 64 | /* Create a mapping for a real_irq if it doesn't already exist. |
58 | * Return the virtual irq as a convenience. | 65 | * Return the virtual irq as a convenience. |
59 | */ | 66 | */ |
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h index ffc7462d77ba..88b553c6b26c 100644 --- a/include/asm-powerpc/thread_info.h +++ b/include/asm-powerpc/thread_info.h | |||
@@ -37,6 +37,8 @@ struct thread_info { | |||
37 | int preempt_count; /* 0 => preemptable, | 37 | int preempt_count; /* 0 => preemptable, |
38 | <0 => BUG */ | 38 | <0 => BUG */ |
39 | struct restart_block restart_block; | 39 | struct restart_block restart_block; |
40 | unsigned long local_flags; /* private flags for thread */ | ||
41 | |||
40 | /* low level flags - has atomic operations done on it */ | 42 | /* low level flags - has atomic operations done on it */ |
41 | unsigned long flags ____cacheline_aligned_in_smp; | 43 | unsigned long flags ____cacheline_aligned_in_smp; |
42 | }; | 44 | }; |
@@ -143,6 +145,12 @@ static inline struct thread_info *current_thread_info(void) | |||
143 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) | 145 | _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) |
144 | #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) | 146 | #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) |
145 | 147 | ||
148 | /* Bits in local_flags */ | ||
149 | /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ | ||
150 | #define TLF_NAPPING 0 /* idle thread enabled NAP mode */ | ||
151 | |||
152 | #define _TLF_NAPPING (1 << TLF_NAPPING) | ||
153 | |||
146 | #endif /* __KERNEL__ */ | 154 | #endif /* __KERNEL__ */ |
147 | 155 | ||
148 | #endif /* _ASM_POWERPC_THREAD_INFO_H */ | 156 | #endif /* _ASM_POWERPC_THREAD_INFO_H */ |
diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h index 2a911aa4cae9..45feff893b8e 100644 --- a/include/asm-sparc/unistd.h +++ b/include/asm-sparc/unistd.h | |||
@@ -248,7 +248,7 @@ | |||
248 | #define __NR_setfsgid 229 /* Linux Specific */ | 248 | #define __NR_setfsgid 229 /* Linux Specific */ |
249 | #define __NR__newselect 230 /* Linux Specific */ | 249 | #define __NR__newselect 230 /* Linux Specific */ |
250 | #define __NR_time 231 /* Linux Specific */ | 250 | #define __NR_time 231 /* Linux Specific */ |
251 | #define __NR_sys_splice 232 /* Linux Specific */ | 251 | #define __NR_splice 232 /* Linux Specific */ |
252 | #define __NR_stime 233 /* Linux Specific */ | 252 | #define __NR_stime 233 /* Linux Specific */ |
253 | #define __NR_statfs64 234 /* Linux Specific */ | 253 | #define __NR_statfs64 234 /* Linux Specific */ |
254 | #define __NR_fstatfs64 235 /* Linux Specific */ | 254 | #define __NR_fstatfs64 235 /* Linux Specific */ |
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h index 6ada6a871cc8..597f6923a46e 100644 --- a/include/asm-sparc64/unistd.h +++ b/include/asm-sparc64/unistd.h | |||
@@ -250,7 +250,7 @@ | |||
250 | #ifdef __KERNEL__ | 250 | #ifdef __KERNEL__ |
251 | #define __NR_time 231 /* Linux sparc32 */ | 251 | #define __NR_time 231 /* Linux sparc32 */ |
252 | #endif | 252 | #endif |
253 | #define __NR_sys_splice 232 /* Linux Specific */ | 253 | #define __NR_splice 232 /* Linux Specific */ |
254 | #define __NR_stime 233 /* Linux Specific */ | 254 | #define __NR_stime 233 /* Linux Specific */ |
255 | #define __NR_statfs64 234 /* Linux Specific */ | 255 | #define __NR_statfs64 234 /* Linux Specific */ |
256 | #define __NR_fstatfs64 235 /* Linux Specific */ | 256 | #define __NR_fstatfs64 235 /* Linux Specific */ |
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h index c8043a16152e..f8dff1c67538 100644 --- a/include/asm-x86_64/cache.h +++ b/include/asm-x86_64/cache.h | |||
@@ -20,8 +20,8 @@ | |||
20 | __attribute__((__section__(".data.page_aligned"))) | 20 | __attribute__((__section__(".data.page_aligned"))) |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | ||
24 | |||
25 | #endif | 23 | #endif |
26 | 24 | ||
25 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | ||
26 | |||
27 | #endif | 27 | #endif |
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h index 76bb6193ae91..662964b74e34 100644 --- a/include/asm-x86_64/cpufeature.h +++ b/include/asm-x86_64/cpufeature.h | |||
@@ -64,6 +64,7 @@ | |||
64 | #define X86_FEATURE_REP_GOOD (3*32+ 4) /* rep microcode works well on this CPU */ | 64 | #define X86_FEATURE_REP_GOOD (3*32+ 4) /* rep microcode works well on this CPU */ |
65 | #define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ | 65 | #define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ |
66 | #define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ | 66 | #define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ |
67 | #define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ | ||
67 | 68 | ||
68 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 69 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
69 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 70 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ |
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h index 876eb9a2fe78..cba8a3b0cded 100644 --- a/include/asm-x86_64/i387.h +++ b/include/asm-x86_64/i387.h | |||
@@ -72,6 +72,23 @@ extern int set_fpregs(struct task_struct *tsk, | |||
72 | #define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val)) | 72 | #define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val)) |
73 | #define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val)) | 73 | #define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val)) |
74 | 74 | ||
75 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | ||
76 | |||
77 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception | ||
78 | is pending. Clear the x87 state here by setting it to fixed | ||
79 | values. The kernel data segment can be sometimes 0 and sometimes | ||
80 | new user value. Both should be ok. | ||
81 | Use the PDA as safe address because it should be already in L1. */ | ||
82 | static inline void clear_fpu_state(struct i387_fxsave_struct *fx) | ||
83 | { | ||
84 | if (unlikely(fx->swd & X87_FSW_ES)) | ||
85 | asm volatile("fnclex"); | ||
86 | alternative_input(ASM_NOP8 ASM_NOP2, | ||
87 | " emms\n" /* clear stack tags */ | ||
88 | " fildl %%gs:0", /* load to clear state */ | ||
89 | X86_FEATURE_FXSAVE_LEAK); | ||
90 | } | ||
91 | |||
75 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | 92 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) |
76 | { | 93 | { |
77 | int err; | 94 | int err; |
@@ -119,6 +136,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | |||
119 | #endif | 136 | #endif |
120 | if (unlikely(err)) | 137 | if (unlikely(err)) |
121 | __clear_user(fx, sizeof(struct i387_fxsave_struct)); | 138 | __clear_user(fx, sizeof(struct i387_fxsave_struct)); |
139 | /* No need to clear here because the caller clears USED_MATH */ | ||
122 | return err; | 140 | return err; |
123 | } | 141 | } |
124 | 142 | ||
@@ -149,7 +167,7 @@ static inline void __fxsave_clear(struct task_struct *tsk) | |||
149 | "i" (offsetof(__typeof__(*tsk), | 167 | "i" (offsetof(__typeof__(*tsk), |
150 | thread.i387.fxsave))); | 168 | thread.i387.fxsave))); |
151 | #endif | 169 | #endif |
152 | __asm__ __volatile__("fnclex"); | 170 | clear_fpu_state(&tsk->thread.i387.fxsave); |
153 | } | 171 | } |
154 | 172 | ||
155 | static inline void kernel_fpu_begin(void) | 173 | static inline void kernel_fpu_begin(void) |
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h index 6b18cd8f293d..6944e7122df5 100644 --- a/include/asm-x86_64/mmzone.h +++ b/include/asm-x86_64/mmzone.h | |||
@@ -12,7 +12,8 @@ | |||
12 | 12 | ||
13 | #include <asm/smp.h> | 13 | #include <asm/smp.h> |
14 | 14 | ||
15 | #define NODEMAPSIZE 0xfff | 15 | /* Should really switch to dynamic allocation at some point */ |
16 | #define NODEMAPSIZE 0x4fff | ||
16 | 17 | ||
17 | /* Simple perfect hash to map physical addresses to node numbers */ | 18 | /* Simple perfect hash to map physical addresses to node numbers */ |
18 | struct memnode { | 19 | struct memnode { |
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h index 4405b4adeaba..7f33aaf9f7b1 100644 --- a/include/asm-x86_64/percpu.h +++ b/include/asm-x86_64/percpu.h | |||
@@ -26,7 +26,7 @@ | |||
26 | #define percpu_modcopy(pcpudst, src, size) \ | 26 | #define percpu_modcopy(pcpudst, src, size) \ |
27 | do { \ | 27 | do { \ |
28 | unsigned int __i; \ | 28 | unsigned int __i; \ |
29 | for_each_cpu(__i) \ | 29 | for_each_possible_cpu(__i) \ |
30 | memcpy((pcpudst)+__per_cpu_offset(__i), \ | 30 | memcpy((pcpudst)+__per_cpu_offset(__i), \ |
31 | (src), (size)); \ | 31 | (src), (size)); \ |
32 | } while (0) | 32 | } while (0) |
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index d86494e23b63..98c36eae567c 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h | |||
@@ -613,8 +613,10 @@ __SYSCALL(__NR_get_robust_list, sys_get_robust_list) | |||
613 | __SYSCALL(__NR_splice, sys_splice) | 613 | __SYSCALL(__NR_splice, sys_splice) |
614 | #define __NR_tee 276 | 614 | #define __NR_tee 276 |
615 | __SYSCALL(__NR_tee, sys_tee) | 615 | __SYSCALL(__NR_tee, sys_tee) |
616 | #define __NR_sync_file_range 277 | ||
617 | __SYSCALL(__NR_sync_file_range, sys_sync_file_range) | ||
616 | 618 | ||
617 | #define __NR_syscall_max __NR_tee | 619 | #define __NR_syscall_max __NR_sync_file_range |
618 | 620 | ||
619 | #ifndef __NO_STUBS | 621 | #ifndef __NO_STUBS |
620 | 622 | ||
diff --git a/include/asm-xtensa/ioctls.h b/include/asm-xtensa/ioctls.h index 10c443435c11..3b89a772d0a0 100644 --- a/include/asm-xtensa/ioctls.h +++ b/include/asm-xtensa/ioctls.h | |||
@@ -107,6 +107,6 @@ | |||
107 | #define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */ | 107 | #define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */ |
108 | 108 | ||
109 | #define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */ | 109 | #define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */ |
110 | #define TIOCGICOUNT _IOR('T', 93, struct async_icount) /* read serial port inline interrupt counts */ | 110 | #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ |
111 | 111 | ||
112 | #endif /* _XTENSA_IOCTLS_H */ | 112 | #endif /* _XTENSA_IOCTLS_H */ |
diff --git a/include/linux/ide.h b/include/linux/ide.h index 8d2db412ba9c..a8bef1d1371c 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -1220,7 +1220,6 @@ typedef struct ide_pci_enablebit_s { | |||
1220 | enum { | 1220 | enum { |
1221 | /* Uses ISA control ports not PCI ones. */ | 1221 | /* Uses ISA control ports not PCI ones. */ |
1222 | IDEPCI_FLAG_ISA_PORTS = (1 << 0), | 1222 | IDEPCI_FLAG_ISA_PORTS = (1 << 0), |
1223 | IDEPCI_FLAG_FORCE_PDC = (1 << 1), | ||
1224 | }; | 1223 | }; |
1225 | 1224 | ||
1226 | typedef struct ide_pci_device_s { | 1225 | typedef struct ide_pci_device_s { |
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 4ca3e6ad03ec..911206386171 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
@@ -99,10 +99,7 @@ static inline int __remove_pages(struct zone *zone, unsigned long start_pfn, | |||
99 | return -ENOSYS; | 99 | return -ENOSYS; |
100 | } | 100 | } |
101 | 101 | ||
102 | #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) \ | ||
103 | || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE) | ||
104 | extern int add_memory(u64 start, u64 size); | 102 | extern int add_memory(u64 start, u64 size); |
105 | extern int remove_memory(u64 start, u64 size); | 103 | extern int remove_memory(u64 start, u64 size); |
106 | #endif | ||
107 | 104 | ||
108 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ | 105 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ |
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index 955d3069d727..edfa012fad3a 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h | |||
@@ -13,7 +13,7 @@ | |||
13 | #ifndef __ASM_MV643XX_H | 13 | #ifndef __ASM_MV643XX_H |
14 | #define __ASM_MV643XX_H | 14 | #define __ASM_MV643XX_H |
15 | 15 | ||
16 | #ifdef __MIPS__ | 16 | #ifdef __mips__ |
17 | #include <asm/addrspace.h> | 17 | #include <asm/addrspace.h> |
18 | #include <asm/marvell.h> | 18 | #include <asm/marvell.h> |
19 | #endif | 19 | #endif |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 8d03e10212f5..d6fe048376ab 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -356,6 +356,10 @@ | |||
356 | #define PCI_DEVICE_ID_ATI_IXP300_SATA 0x436e | 356 | #define PCI_DEVICE_ID_ATI_IXP300_SATA 0x436e |
357 | #define PCI_DEVICE_ID_ATI_IXP400_IDE 0x4376 | 357 | #define PCI_DEVICE_ID_ATI_IXP400_IDE 0x4376 |
358 | #define PCI_DEVICE_ID_ATI_IXP400_SATA 0x4379 | 358 | #define PCI_DEVICE_ID_ATI_IXP400_SATA 0x4379 |
359 | #define PCI_DEVICE_ID_ATI_IXP400_SATA2 0x437a | ||
360 | #define PCI_DEVICE_ID_ATI_IXP600_SATA 0x4380 | ||
361 | #define PCI_DEVICE_ID_ATI_IXP600_SRAID 0x4381 | ||
362 | #define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c | ||
359 | 363 | ||
360 | #define PCI_VENDOR_ID_VLSI 0x1004 | 364 | #define PCI_VENDOR_ID_VLSI 0x1004 |
361 | #define PCI_DEVICE_ID_VLSI_82C592 0x0005 | 365 | #define PCI_DEVICE_ID_VLSI_82C592 0x0005 |
diff --git a/include/linux/sched.h b/include/linux/sched.h index b7d31e2e1729..29b7d4f87d20 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1192,8 +1192,7 @@ extern void wait_task_inactive(task_t * p); | |||
1192 | #define remove_parent(p) list_del_init(&(p)->sibling) | 1192 | #define remove_parent(p) list_del_init(&(p)->sibling) |
1193 | #define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children) | 1193 | #define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children) |
1194 | 1194 | ||
1195 | #define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks) | 1195 | #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) |
1196 | #define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks) | ||
1197 | 1196 | ||
1198 | #define for_each_process(p) \ | 1197 | #define for_each_process(p) \ |
1199 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | 1198 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h index 8f96e9dc369a..77f78e56c481 100644 --- a/include/linux/sunrpc/metrics.h +++ b/include/linux/sunrpc/metrics.h | |||
@@ -69,9 +69,21 @@ struct rpc_clnt; | |||
69 | /* | 69 | /* |
70 | * EXPORTed functions for managing rpc_iostats structures | 70 | * EXPORTed functions for managing rpc_iostats structures |
71 | */ | 71 | */ |
72 | |||
73 | #ifdef CONFIG_PROC_FS | ||
74 | |||
72 | struct rpc_iostats * rpc_alloc_iostats(struct rpc_clnt *); | 75 | struct rpc_iostats * rpc_alloc_iostats(struct rpc_clnt *); |
73 | void rpc_count_iostats(struct rpc_task *); | 76 | void rpc_count_iostats(struct rpc_task *); |
74 | void rpc_print_iostats(struct seq_file *, struct rpc_clnt *); | 77 | void rpc_print_iostats(struct seq_file *, struct rpc_clnt *); |
75 | void rpc_free_iostats(struct rpc_iostats *); | 78 | void rpc_free_iostats(struct rpc_iostats *); |
76 | 79 | ||
80 | #else /* CONFIG_PROC_FS */ | ||
81 | |||
82 | static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; } | ||
83 | static inline void rpc_count_iostats(struct rpc_task *task) {} | ||
84 | static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {} | ||
85 | static inline void rpc_free_iostats(struct rpc_iostats *stats) {} | ||
86 | |||
87 | #endif /* CONFIG_PROC_FS */ | ||
88 | |||
77 | #endif /* _LINUX_SUNRPC_METRICS_H */ | 89 | #endif /* _LINUX_SUNRPC_METRICS_H */ |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 7eebbab7160b..e8bbe8118de8 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -53,6 +53,7 @@ struct rpc_timeout { | |||
53 | 53 | ||
54 | struct rpc_task; | 54 | struct rpc_task; |
55 | struct rpc_xprt; | 55 | struct rpc_xprt; |
56 | struct seq_file; | ||
56 | 57 | ||
57 | /* | 58 | /* |
58 | * This describes a complete RPC request | 59 | * This describes a complete RPC request |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 6d6f0634ae41..4abedb8eaece 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -230,7 +230,7 @@ extern int ip6_ra_control(struct sock *sk, int sel, | |||
230 | void (*destructor)(struct sock *)); | 230 | void (*destructor)(struct sock *)); |
231 | 231 | ||
232 | 232 | ||
233 | extern int ipv6_parse_hopopts(struct sk_buff *skb, int); | 233 | extern int ipv6_parse_hopopts(struct sk_buff *skb); |
234 | 234 | ||
235 | extern struct ipv6_txoptions * ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt); | 235 | extern struct ipv6_txoptions * ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt); |
236 | extern struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, | 236 | extern struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, |