diff options
Diffstat (limited to 'include/asm-mips')
34 files changed, 1240 insertions, 78 deletions
diff --git a/include/asm-mips/asmmacro.h b/include/asm-mips/asmmacro.h index 30b18ea6cb11..f54aa147ec19 100644 --- a/include/asm-mips/asmmacro.h +++ b/include/asm-mips/asmmacro.h | |||
@@ -17,7 +17,26 @@ | |||
17 | #ifdef CONFIG_64BIT | 17 | #ifdef CONFIG_64BIT |
18 | #include <asm/asmmacro-64.h> | 18 | #include <asm/asmmacro-64.h> |
19 | #endif | 19 | #endif |
20 | #ifdef CONFIG_MIPS_MT_SMTC | ||
21 | #include <asm/mipsmtregs.h> | ||
22 | #endif | ||
20 | 23 | ||
24 | #ifdef CONFIG_MIPS_MT_SMTC | ||
25 | .macro local_irq_enable reg=t0 | ||
26 | mfc0 \reg, CP0_TCSTATUS | ||
27 | ori \reg, \reg, TCSTATUS_IXMT | ||
28 | xori \reg, \reg, TCSTATUS_IXMT | ||
29 | mtc0 \reg, CP0_TCSTATUS | ||
30 | ehb | ||
31 | .endm | ||
32 | |||
33 | .macro local_irq_disable reg=t0 | ||
34 | mfc0 \reg, CP0_TCSTATUS | ||
35 | ori \reg, \reg, TCSTATUS_IXMT | ||
36 | mtc0 \reg, CP0_TCSTATUS | ||
37 | ehb | ||
38 | .endm | ||
39 | #else | ||
21 | .macro local_irq_enable reg=t0 | 40 | .macro local_irq_enable reg=t0 |
22 | mfc0 \reg, CP0_STATUS | 41 | mfc0 \reg, CP0_STATUS |
23 | ori \reg, \reg, 1 | 42 | ori \reg, \reg, 1 |
@@ -32,6 +51,7 @@ | |||
32 | mtc0 \reg, CP0_STATUS | 51 | mtc0 \reg, CP0_STATUS |
33 | irq_disable_hazard | 52 | irq_disable_hazard |
34 | .endm | 53 | .endm |
54 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
35 | 55 | ||
36 | #ifdef CONFIG_CPU_SB1 | 56 | #ifdef CONFIG_CPU_SB1 |
37 | .macro fpu_enable_hazard | 57 | .macro fpu_enable_hazard |
@@ -48,4 +68,31 @@ | |||
48 | .endm | 68 | .endm |
49 | #endif | 69 | #endif |
50 | 70 | ||
71 | /* | ||
72 | * Temporary until all gas have MT ASE support | ||
73 | */ | ||
74 | .macro DMT reg=0 | ||
75 | .word (0x41600bc1 | (\reg << 16)) | ||
76 | .endm | ||
77 | |||
78 | .macro EMT reg=0 | ||
79 | .word (0x41600be1 | (\reg << 16)) | ||
80 | .endm | ||
81 | |||
82 | .macro DVPE reg=0 | ||
83 | .word (0x41600001 | (\reg << 16)) | ||
84 | .endm | ||
85 | |||
86 | .macro EVPE reg=0 | ||
87 | .word (0x41600021 | (\reg << 16)) | ||
88 | .endm | ||
89 | |||
90 | .macro MFTR rt=0, rd=0, u=0, sel=0 | ||
91 | .word (0x41000000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)) | ||
92 | .endm | ||
93 | |||
94 | .macro MTTR rt=0, rd=0, u=0, sel=0 | ||
95 | .word (0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)) | ||
96 | .endm | ||
97 | |||
51 | #endif /* _ASM_ASMMACRO_H */ | 98 | #endif /* _ASM_ASMMACRO_H */ |
diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h index aeae9fabf4a9..47bc8f6c20d2 100644 --- a/include/asm-mips/cacheflush.h +++ b/include/asm-mips/cacheflush.h | |||
@@ -74,6 +74,7 @@ static inline void copy_from_user_page(struct vm_area_struct *vma, | |||
74 | 74 | ||
75 | extern void (*flush_cache_sigtramp)(unsigned long addr); | 75 | extern void (*flush_cache_sigtramp)(unsigned long addr); |
76 | extern void (*flush_icache_all)(void); | 76 | extern void (*flush_icache_all)(void); |
77 | extern void (*local_flush_data_cache_page)(void * addr); | ||
77 | extern void (*flush_data_cache_page)(unsigned long addr); | 78 | extern void (*flush_data_cache_page)(unsigned long addr); |
78 | 79 | ||
79 | /* | 80 | /* |
diff --git a/include/asm-mips/cpu-features.h b/include/asm-mips/cpu-features.h index 3f2b6d9ac45e..254e11ed247b 100644 --- a/include/asm-mips/cpu-features.h +++ b/include/asm-mips/cpu-features.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #define cpu_has_sb1_cache (cpu_data[0].options & MIPS_CPU_SB1_CACHE) | 40 | #define cpu_has_sb1_cache (cpu_data[0].options & MIPS_CPU_SB1_CACHE) |
41 | #endif | 41 | #endif |
42 | #ifndef cpu_has_fpu | 42 | #ifndef cpu_has_fpu |
43 | #define cpu_has_fpu (cpu_data[0].options & MIPS_CPU_FPU) | 43 | #define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU) |
44 | #endif | 44 | #endif |
45 | #ifndef cpu_has_32fpr | 45 | #ifndef cpu_has_32fpr |
46 | #define cpu_has_32fpr (cpu_data[0].options & MIPS_CPU_32FPR) | 46 | #define cpu_has_32fpr (cpu_data[0].options & MIPS_CPU_32FPR) |
diff --git a/include/asm-mips/cpu-info.h b/include/asm-mips/cpu-info.h index 140be1c67da7..6572ac703662 100644 --- a/include/asm-mips/cpu-info.h +++ b/include/asm-mips/cpu-info.h | |||
@@ -73,6 +73,16 @@ struct cpuinfo_mips { | |||
73 | struct cache_desc dcache; /* Primary D or combined I/D cache */ | 73 | struct cache_desc dcache; /* Primary D or combined I/D cache */ |
74 | struct cache_desc scache; /* Secondary cache */ | 74 | struct cache_desc scache; /* Secondary cache */ |
75 | struct cache_desc tcache; /* Tertiary/split secondary cache */ | 75 | struct cache_desc tcache; /* Tertiary/split secondary cache */ |
76 | #if defined(CONFIG_MIPS_MT_SMTC) | ||
77 | /* | ||
78 | * In the MIPS MT "SMTC" model, each TC is considered | ||
79 | * to be a "CPU" for the purposes of scheduling, but | ||
80 | * exception resources, ASID spaces, etc, are common | ||
81 | * to all TCs within the same VPE. | ||
82 | */ | ||
83 | int vpe_id; /* Virtual Processor number */ | ||
84 | int tc_id; /* Thread Context number */ | ||
85 | #endif /* CONFIG_MIPS_MT */ | ||
76 | void *data; /* Additional data */ | 86 | void *data; /* Additional data */ |
77 | } __attribute__((aligned(SMP_CACHE_BYTES))); | 87 | } __attribute__((aligned(SMP_CACHE_BYTES))); |
78 | 88 | ||
diff --git a/include/asm-mips/ds1742.h b/include/asm-mips/ds1742.h new file mode 100644 index 000000000000..c2f2c32da637 --- /dev/null +++ b/include/asm-mips/ds1742.h | |||
@@ -0,0 +1,13 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) | ||
7 | */ | ||
8 | #ifndef _ASM_DS1742_H | ||
9 | #define _ASM_DS1742_H | ||
10 | |||
11 | #include <ds1742.h> | ||
12 | |||
13 | #endif /* _ASM_DS1742_H */ | ||
diff --git a/include/asm-mips/elf.h b/include/asm-mips/elf.h index 851f013adad3..bdc9de2df1ef 100644 --- a/include/asm-mips/elf.h +++ b/include/asm-mips/elf.h | |||
@@ -119,8 +119,49 @@ | |||
119 | #define SHT_MIPS_CONFLICT 0x70000002 | 119 | #define SHT_MIPS_CONFLICT 0x70000002 |
120 | #define SHT_MIPS_GPTAB 0x70000003 | 120 | #define SHT_MIPS_GPTAB 0x70000003 |
121 | #define SHT_MIPS_UCODE 0x70000004 | 121 | #define SHT_MIPS_UCODE 0x70000004 |
122 | 122 | #define SHT_MIPS_DEBUG 0x70000005 | |
123 | #define SHF_MIPS_GPREL 0x10000000 | 123 | #define SHT_MIPS_REGINFO 0x70000006 |
124 | #define SHT_MIPS_PACKAGE 0x70000007 | ||
125 | #define SHT_MIPS_PACKSYM 0x70000008 | ||
126 | #define SHT_MIPS_RELD 0x70000009 | ||
127 | #define SHT_MIPS_IFACE 0x7000000b | ||
128 | #define SHT_MIPS_CONTENT 0x7000000c | ||
129 | #define SHT_MIPS_OPTIONS 0x7000000d | ||
130 | #define SHT_MIPS_SHDR 0x70000010 | ||
131 | #define SHT_MIPS_FDESC 0x70000011 | ||
132 | #define SHT_MIPS_EXTSYM 0x70000012 | ||
133 | #define SHT_MIPS_DENSE 0x70000013 | ||
134 | #define SHT_MIPS_PDESC 0x70000014 | ||
135 | #define SHT_MIPS_LOCSYM 0x70000015 | ||
136 | #define SHT_MIPS_AUXSYM 0x70000016 | ||
137 | #define SHT_MIPS_OPTSYM 0x70000017 | ||
138 | #define SHT_MIPS_LOCSTR 0x70000018 | ||
139 | #define SHT_MIPS_LINE 0x70000019 | ||
140 | #define SHT_MIPS_RFDESC 0x7000001a | ||
141 | #define SHT_MIPS_DELTASYM 0x7000001b | ||
142 | #define SHT_MIPS_DELTAINST 0x7000001c | ||
143 | #define SHT_MIPS_DELTACLASS 0x7000001d | ||
144 | #define SHT_MIPS_DWARF 0x7000001e | ||
145 | #define SHT_MIPS_DELTADECL 0x7000001f | ||
146 | #define SHT_MIPS_SYMBOL_LIB 0x70000020 | ||
147 | #define SHT_MIPS_EVENTS 0x70000021 | ||
148 | #define SHT_MIPS_TRANSLATE 0x70000022 | ||
149 | #define SHT_MIPS_PIXIE 0x70000023 | ||
150 | #define SHT_MIPS_XLATE 0x70000024 | ||
151 | #define SHT_MIPS_XLATE_DEBUG 0x70000025 | ||
152 | #define SHT_MIPS_WHIRL 0x70000026 | ||
153 | #define SHT_MIPS_EH_REGION 0x70000027 | ||
154 | #define SHT_MIPS_XLATE_OLD 0x70000028 | ||
155 | #define SHT_MIPS_PDR_EXCEPTION 0x70000029 | ||
156 | |||
157 | #define SHF_MIPS_GPREL 0x10000000 | ||
158 | #define SHF_MIPS_MERGE 0x20000000 | ||
159 | #define SHF_MIPS_ADDR 0x40000000 | ||
160 | #define SHF_MIPS_STRING 0x80000000 | ||
161 | #define SHF_MIPS_NOSTRIP 0x08000000 | ||
162 | #define SHF_MIPS_LOCAL 0x04000000 | ||
163 | #define SHF_MIPS_NAMES 0x02000000 | ||
164 | #define SHF_MIPS_NODUPES 0x01000000 | ||
124 | 165 | ||
125 | #ifndef ELF_ARCH | 166 | #ifndef ELF_ARCH |
126 | /* ELF register definitions */ | 167 | /* ELF register definitions */ |
diff --git a/include/asm-mips/fpu.h b/include/asm-mips/fpu.h index 9c828b1f8218..b0f50015e252 100644 --- a/include/asm-mips/fpu.h +++ b/include/asm-mips/fpu.h | |||
@@ -21,6 +21,10 @@ | |||
21 | #include <asm/processor.h> | 21 | #include <asm/processor.h> |
22 | #include <asm/current.h> | 22 | #include <asm/current.h> |
23 | 23 | ||
24 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
25 | #include <asm/mips_mt.h> | ||
26 | #endif | ||
27 | |||
24 | struct sigcontext; | 28 | struct sigcontext; |
25 | struct sigcontext32; | 29 | struct sigcontext32; |
26 | 30 | ||
diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h index feb29a793888..dadc05188db7 100644 --- a/include/asm-mips/hazards.h +++ b/include/asm-mips/hazards.h | |||
@@ -284,6 +284,8 @@ do { \ | |||
284 | #define instruction_hazard() do { } while (0) | 284 | #define instruction_hazard() do { } while (0) |
285 | #endif | 285 | #endif |
286 | 286 | ||
287 | extern void mips_ihb(void); | ||
288 | |||
287 | #endif /* __ASSEMBLY__ */ | 289 | #endif /* __ASSEMBLY__ */ |
288 | 290 | ||
289 | #endif /* _ASM_HAZARDS_H */ | 291 | #endif /* _ASM_HAZARDS_H */ |
diff --git a/include/asm-mips/interrupt.h b/include/asm-mips/interrupt.h index 774348734fa0..4bb9c06f4410 100644 --- a/include/asm-mips/interrupt.h +++ b/include/asm-mips/interrupt.h | |||
@@ -19,7 +19,12 @@ __asm__ ( | |||
19 | " .set push \n" | 19 | " .set push \n" |
20 | " .set reorder \n" | 20 | " .set reorder \n" |
21 | " .set noat \n" | 21 | " .set noat \n" |
22 | #ifdef CONFIG_CPU_MIPSR2 | 22 | #ifdef CONFIG_MIPS_MT_SMTC |
23 | " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" | ||
24 | " ori $1, 0x400 \n" | ||
25 | " xori $1, 0x400 \n" | ||
26 | " mtc0 $1, $2, 1 \n" | ||
27 | #elif defined(CONFIG_CPU_MIPSR2) | ||
23 | " ei \n" | 28 | " ei \n" |
24 | #else | 29 | #else |
25 | " mfc0 $1,$12 \n" | 30 | " mfc0 $1,$12 \n" |
@@ -62,7 +67,12 @@ __asm__ ( | |||
62 | " .macro local_irq_disable\n" | 67 | " .macro local_irq_disable\n" |
63 | " .set push \n" | 68 | " .set push \n" |
64 | " .set noat \n" | 69 | " .set noat \n" |
65 | #ifdef CONFIG_CPU_MIPSR2 | 70 | #ifdef CONFIG_MIPS_MT_SMTC |
71 | " mfc0 $1, $2, 1 \n" | ||
72 | " ori $1, 0x400 \n" | ||
73 | " .set noreorder \n" | ||
74 | " mtc0 $1, $2, 1 \n" | ||
75 | #elif defined(CONFIG_CPU_MIPSR2) | ||
66 | " di \n" | 76 | " di \n" |
67 | #else | 77 | #else |
68 | " mfc0 $1,$12 \n" | 78 | " mfc0 $1,$12 \n" |
@@ -88,7 +98,11 @@ __asm__ ( | |||
88 | " .macro local_save_flags flags \n" | 98 | " .macro local_save_flags flags \n" |
89 | " .set push \n" | 99 | " .set push \n" |
90 | " .set reorder \n" | 100 | " .set reorder \n" |
101 | #ifdef CONFIG_MIPS_MT_SMTC | ||
102 | " mfc0 \\flags, $2, 1 \n" | ||
103 | #else | ||
91 | " mfc0 \\flags, $12 \n" | 104 | " mfc0 \\flags, $12 \n" |
105 | #endif | ||
92 | " .set pop \n" | 106 | " .set pop \n" |
93 | " .endm \n"); | 107 | " .endm \n"); |
94 | 108 | ||
@@ -102,7 +116,13 @@ __asm__ ( | |||
102 | " .set push \n" | 116 | " .set push \n" |
103 | " .set reorder \n" | 117 | " .set reorder \n" |
104 | " .set noat \n" | 118 | " .set noat \n" |
105 | #ifdef CONFIG_CPU_MIPSR2 | 119 | #ifdef CONFIG_MIPS_MT_SMTC |
120 | " mfc0 \\result, $2, 1 \n" | ||
121 | " ori $1, \\result, 0x400 \n" | ||
122 | " .set noreorder \n" | ||
123 | " mtc0 $1, $2, 1 \n" | ||
124 | " andi \\result, \\result, 0x400 \n" | ||
125 | #elif defined(CONFIG_CPU_MIPSR2) | ||
106 | " di \\result \n" | 126 | " di \\result \n" |
107 | " andi \\result, 1 \n" | 127 | " andi \\result, 1 \n" |
108 | #else | 128 | #else |
@@ -128,7 +148,14 @@ __asm__ ( | |||
128 | " .set push \n" | 148 | " .set push \n" |
129 | " .set noreorder \n" | 149 | " .set noreorder \n" |
130 | " .set noat \n" | 150 | " .set noat \n" |
131 | #if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | 151 | #ifdef CONFIG_MIPS_MT_SMTC |
152 | "mfc0 $1, $2, 1 \n" | ||
153 | "andi \\flags, 0x400 \n" | ||
154 | "ori $1, 0x400 \n" | ||
155 | "xori $1, 0x400 \n" | ||
156 | "or \\flags, $1 \n" | ||
157 | "mtc0 \\flags, $2, 1 \n" | ||
158 | #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | ||
132 | /* | 159 | /* |
133 | * Slow, but doesn't suffer from a relativly unlikely race | 160 | * Slow, but doesn't suffer from a relativly unlikely race |
134 | * condition we're having since days 1. | 161 | * condition we're having since days 1. |
@@ -167,11 +194,29 @@ do { \ | |||
167 | : "memory"); \ | 194 | : "memory"); \ |
168 | } while(0) | 195 | } while(0) |
169 | 196 | ||
170 | #define irqs_disabled() \ | 197 | static inline int irqs_disabled(void) |
171 | ({ \ | 198 | { |
172 | unsigned long flags; \ | 199 | #ifdef CONFIG_MIPS_MT_SMTC |
173 | local_save_flags(flags); \ | 200 | /* |
174 | !(flags & 1); \ | 201 | * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU |
175 | }) | 202 | */ |
203 | unsigned long __result; | ||
204 | |||
205 | __asm__ __volatile__( | ||
206 | " .set noreorder \n" | ||
207 | " mfc0 %0, $2, 1 \n" | ||
208 | " andi %0, 0x400 \n" | ||
209 | " slt %0, $0, %0 \n" | ||
210 | " .set reorder \n" | ||
211 | : "=r" (__result)); | ||
212 | |||
213 | return __result; | ||
214 | #else | ||
215 | unsigned long flags; | ||
216 | local_save_flags(flags); | ||
217 | |||
218 | return !(flags & 1); | ||
219 | #endif | ||
220 | } | ||
176 | 221 | ||
177 | #endif /* _ASM_INTERRUPT_H */ | 222 | #endif /* _ASM_INTERRUPT_H */ |
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index 8a342ccb34a8..dde677f02bc0 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h | |||
@@ -11,6 +11,9 @@ | |||
11 | 11 | ||
12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | |||
15 | #include <asm/mipsmtregs.h> | ||
16 | |||
14 | #include <irq.h> | 17 | #include <irq.h> |
15 | 18 | ||
16 | #ifdef CONFIG_I8259 | 19 | #ifdef CONFIG_I8259 |
@@ -26,6 +29,23 @@ struct pt_regs; | |||
26 | 29 | ||
27 | extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs); | 30 | extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs); |
28 | 31 | ||
32 | #ifdef CONFIG_MIPS_MT_SMTC | ||
33 | /* | ||
34 | * Clear interrupt mask handling "backstop" if irq_hwmask | ||
35 | * entry so indicates. This implies that the ack() or end() | ||
36 | * functions will take over re-enabling the low-level mask. | ||
37 | * Otherwise it will be done on return from exception. | ||
38 | */ | ||
39 | #define __DO_IRQ_SMTC_HOOK() \ | ||
40 | do { \ | ||
41 | if (irq_hwmask[irq] & 0x0000ff00) \ | ||
42 | write_c0_tccontext(read_c0_tccontext() & \ | ||
43 | ~(irq_hwmask[irq] & 0x0000ff00)); \ | ||
44 | } while (0) | ||
45 | #else | ||
46 | #define __DO_IRQ_SMTC_HOOK() do { } while (0) | ||
47 | #endif | ||
48 | |||
29 | #ifdef CONFIG_PREEMPT | 49 | #ifdef CONFIG_PREEMPT |
30 | 50 | ||
31 | /* | 51 | /* |
@@ -39,6 +59,7 @@ extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs); | |||
39 | #define do_IRQ(irq, regs) \ | 59 | #define do_IRQ(irq, regs) \ |
40 | do { \ | 60 | do { \ |
41 | irq_enter(); \ | 61 | irq_enter(); \ |
62 | __DO_IRQ_SMTC_HOOK(); \ | ||
42 | __do_IRQ((irq), (regs)); \ | 63 | __do_IRQ((irq), (regs)); \ |
43 | irq_exit(); \ | 64 | irq_exit(); \ |
44 | } while (0) | 65 | } while (0) |
@@ -46,5 +67,14 @@ do { \ | |||
46 | #endif | 67 | #endif |
47 | 68 | ||
48 | extern void arch_init_irq(void); | 69 | extern void arch_init_irq(void); |
70 | extern void spurious_interrupt(struct pt_regs *regs); | ||
71 | |||
72 | #ifdef CONFIG_MIPS_MT_SMTC | ||
73 | struct irqaction; | ||
74 | |||
75 | extern unsigned long irq_hwmask[]; | ||
76 | extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, | ||
77 | unsigned long hwmask); | ||
78 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
49 | 79 | ||
50 | #endif /* _ASM_IRQ_H */ | 80 | #endif /* _ASM_IRQ_H */ |
diff --git a/include/asm-mips/kspd.h b/include/asm-mips/kspd.h new file mode 100644 index 000000000000..4e9e724c8935 --- /dev/null +++ b/include/asm-mips/kspd.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can distribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License (Version 2) as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
11 | * for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
15 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_KSPD_H | ||
20 | #define _ASM_KSPD_H | ||
21 | |||
22 | struct kspd_notifications { | ||
23 | void (*kspd_sp_exit)(int sp_id); | ||
24 | |||
25 | struct list_head list; | ||
26 | }; | ||
27 | |||
28 | #ifdef CONFIG_MIPS_APSP_KSPD | ||
29 | extern void kspd_notify(struct kspd_notifications *notify); | ||
30 | #else | ||
31 | static inline void kspd_notify(struct kspd_notifications *notify) | ||
32 | { | ||
33 | } | ||
34 | #endif | ||
35 | |||
36 | #endif | ||
diff --git a/include/asm-mips/mach-generic/ide.h b/include/asm-mips/mach-generic/ide.h index 550979a9ea9d..e3315359500a 100644 --- a/include/asm-mips/mach-generic/ide.h +++ b/include/asm-mips/mach-generic/ide.h | |||
@@ -104,65 +104,107 @@ static __inline__ unsigned long ide_default_io_base(int index) | |||
104 | #endif | 104 | #endif |
105 | 105 | ||
106 | /* MIPS port and memory-mapped I/O string operations. */ | 106 | /* MIPS port and memory-mapped I/O string operations. */ |
107 | static inline void __ide_flush_prologue(void) | ||
108 | { | ||
109 | #ifdef CONFIG_SMP | ||
110 | if (cpu_has_dc_aliases) | ||
111 | preempt_disable(); | ||
112 | #endif | ||
113 | } | ||
114 | |||
115 | static inline void __ide_flush_epilogue(void) | ||
116 | { | ||
117 | #ifdef CONFIG_SMP | ||
118 | if (cpu_has_dc_aliases) | ||
119 | preempt_enable(); | ||
120 | #endif | ||
121 | } | ||
107 | 122 | ||
108 | static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long size) | 123 | static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long size) |
109 | { | 124 | { |
110 | if (cpu_has_dc_aliases) { | 125 | if (cpu_has_dc_aliases) { |
111 | unsigned long end = addr + size; | 126 | unsigned long end = addr + size; |
112 | for (; addr < end; addr += PAGE_SIZE) | 127 | |
113 | flush_dcache_page(virt_to_page(addr)); | 128 | while (addr < end) { |
129 | local_flush_data_cache_page((void *)addr); | ||
130 | addr += PAGE_SIZE; | ||
131 | } | ||
114 | } | 132 | } |
115 | } | 133 | } |
116 | 134 | ||
135 | /* | ||
136 | * insw() and gang might be called with interrupts disabled, so we can't | ||
137 | * send IPIs for flushing due to the potencial of deadlocks, see the comment | ||
138 | * above smp_call_function() in arch/mips/kernel/smp.c. We work around the | ||
139 | * problem by disabling preemption so we know we actually perform the flush | ||
140 | * on the processor that actually has the lines to be flushed which hopefully | ||
141 | * is even better for performance anyway. | ||
142 | */ | ||
117 | static inline void __ide_insw(unsigned long port, void *addr, | 143 | static inline void __ide_insw(unsigned long port, void *addr, |
118 | unsigned int count) | 144 | unsigned int count) |
119 | { | 145 | { |
146 | __ide_flush_prologue(); | ||
120 | insw(port, addr, count); | 147 | insw(port, addr, count); |
121 | __ide_flush_dcache_range((unsigned long)addr, count * 2); | 148 | __ide_flush_dcache_range((unsigned long)addr, count * 2); |
149 | __ide_flush_epilogue(); | ||
122 | } | 150 | } |
123 | 151 | ||
124 | static inline void __ide_insl(unsigned long port, void *addr, unsigned int count) | 152 | static inline void __ide_insl(unsigned long port, void *addr, unsigned int count) |
125 | { | 153 | { |
154 | __ide_flush_prologue(); | ||
126 | insl(port, addr, count); | 155 | insl(port, addr, count); |
127 | __ide_flush_dcache_range((unsigned long)addr, count * 4); | 156 | __ide_flush_dcache_range((unsigned long)addr, count * 4); |
157 | __ide_flush_epilogue(); | ||
128 | } | 158 | } |
129 | 159 | ||
130 | static inline void __ide_outsw(unsigned long port, const void *addr, | 160 | static inline void __ide_outsw(unsigned long port, const void *addr, |
131 | unsigned long count) | 161 | unsigned long count) |
132 | { | 162 | { |
163 | __ide_flush_prologue(); | ||
133 | outsw(port, addr, count); | 164 | outsw(port, addr, count); |
134 | __ide_flush_dcache_range((unsigned long)addr, count * 2); | 165 | __ide_flush_dcache_range((unsigned long)addr, count * 2); |
166 | __ide_flush_epilogue(); | ||
135 | } | 167 | } |
136 | 168 | ||
137 | static inline void __ide_outsl(unsigned long port, const void *addr, | 169 | static inline void __ide_outsl(unsigned long port, const void *addr, |
138 | unsigned long count) | 170 | unsigned long count) |
139 | { | 171 | { |
172 | __ide_flush_prologue(); | ||
140 | outsl(port, addr, count); | 173 | outsl(port, addr, count); |
141 | __ide_flush_dcache_range((unsigned long)addr, count * 4); | 174 | __ide_flush_dcache_range((unsigned long)addr, count * 4); |
175 | __ide_flush_epilogue(); | ||
142 | } | 176 | } |
143 | 177 | ||
144 | static inline void __ide_mm_insw(void __iomem *port, void *addr, u32 count) | 178 | static inline void __ide_mm_insw(void __iomem *port, void *addr, u32 count) |
145 | { | 179 | { |
180 | __ide_flush_prologue(); | ||
146 | readsw(port, addr, count); | 181 | readsw(port, addr, count); |
147 | __ide_flush_dcache_range((unsigned long)addr, count * 2); | 182 | __ide_flush_dcache_range((unsigned long)addr, count * 2); |
183 | __ide_flush_epilogue(); | ||
148 | } | 184 | } |
149 | 185 | ||
150 | static inline void __ide_mm_insl(void __iomem *port, void *addr, u32 count) | 186 | static inline void __ide_mm_insl(void __iomem *port, void *addr, u32 count) |
151 | { | 187 | { |
188 | __ide_flush_prologue(); | ||
152 | readsl(port, addr, count); | 189 | readsl(port, addr, count); |
153 | __ide_flush_dcache_range((unsigned long)addr, count * 4); | 190 | __ide_flush_dcache_range((unsigned long)addr, count * 4); |
191 | __ide_flush_epilogue(); | ||
154 | } | 192 | } |
155 | 193 | ||
156 | static inline void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) | 194 | static inline void __ide_mm_outsw(void __iomem *port, void *addr, u32 count) |
157 | { | 195 | { |
196 | __ide_flush_prologue(); | ||
158 | writesw(port, addr, count); | 197 | writesw(port, addr, count); |
159 | __ide_flush_dcache_range((unsigned long)addr, count * 2); | 198 | __ide_flush_dcache_range((unsigned long)addr, count * 2); |
199 | __ide_flush_epilogue(); | ||
160 | } | 200 | } |
161 | 201 | ||
162 | static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count) | 202 | static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count) |
163 | { | 203 | { |
204 | __ide_flush_prologue(); | ||
164 | writesl(port, addr, count); | 205 | writesl(port, addr, count); |
165 | __ide_flush_dcache_range((unsigned long)addr, count * 4); | 206 | __ide_flush_dcache_range((unsigned long)addr, count * 4); |
207 | __ide_flush_epilogue(); | ||
166 | } | 208 | } |
167 | 209 | ||
168 | /* ide_insw calls insw, not __ide_insw. Why? */ | 210 | /* ide_insw calls insw, not __ide_insw. Why? */ |
diff --git a/include/asm-mips/mach-jmr3927/ds1742.h b/include/asm-mips/mach-jmr3927/ds1742.h index cff6192d4bdb..8a8fef6d07fa 100644 --- a/include/asm-mips/mach-jmr3927/ds1742.h +++ b/include/asm-mips/mach-jmr3927/ds1742.h | |||
@@ -3,14 +3,14 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2003 by Ralf Baechle | 6 | * Copyright (C) 2003, 06 by Ralf Baechle |
7 | */ | 7 | */ |
8 | #ifndef __ASM_MACH_JMR3927_DS1742_H | 8 | #ifndef __ASM_MACH_JMR3927_DS1742_H |
9 | #define __ASM_MACH_JMR3927_DS1742_H | 9 | #define __ASM_MACH_JMR3927_DS1742_H |
10 | 10 | ||
11 | #include <asm/jmr3927/jmr3927.h> | 11 | #include <asm/jmr3927/jmr3927.h> |
12 | 12 | ||
13 | #define rtc_read(reg) (jmr3927_nvram_in(addr)) | 13 | #define rtc_read(reg) (jmr3927_nvram_in(reg)) |
14 | #define rtc_write(data, reg) (jmr3927_nvram_out((data),(reg))) | 14 | #define rtc_write(data, reg) (jmr3927_nvram_out((data),(reg))) |
15 | 15 | ||
16 | #endif /* __ASM_MACH_JMR3927_DS1742_H */ | 16 | #endif /* __ASM_MACH_JMR3927_DS1742_H */ |
diff --git a/include/asm-mips/mach-mips/param.h b/include/asm-mips/mach-mips/param.h new file mode 100644 index 000000000000..805ef6d27d3c --- /dev/null +++ b/include/asm-mips/mach-mips/param.h | |||
@@ -0,0 +1,13 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2003 by Ralf Baechle | ||
7 | */ | ||
8 | #ifndef __ASM_MACH_MIPS_PARAM_H | ||
9 | #define __ASM_MACH_MIPS_PARAM_H | ||
10 | |||
11 | #define HZ 100 /* Internal kernel timer frequency */ | ||
12 | |||
13 | #endif /* __ASM_MACH_MIPS_PARAM_H */ | ||
diff --git a/include/asm-mips/marvell.h b/include/asm-mips/marvell.h index 9225b3397a4f..6bb2125bb053 100644 --- a/include/asm-mips/marvell.h +++ b/include/asm-mips/marvell.h | |||
@@ -53,4 +53,6 @@ struct mv_pci_controller { | |||
53 | unsigned long config_vreg; | 53 | unsigned long config_vreg; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | extern void ll_mv64340_irq(struct pt_regs *regs); | ||
57 | |||
56 | #endif /* __ASM_MIPS_MARVELL_H */ | 58 | #endif /* __ASM_MIPS_MARVELL_H */ |
diff --git a/include/asm-mips/mips-boards/atlas.h b/include/asm-mips/mips-boards/atlas.h index 0998151fb3a1..a8ae12d120ee 100644 --- a/include/asm-mips/mips-boards/atlas.h +++ b/include/asm-mips/mips-boards/atlas.h | |||
@@ -33,13 +33,29 @@ | |||
33 | #define ATLAS_RTC_ADR_REG 0x1f000800 | 33 | #define ATLAS_RTC_ADR_REG 0x1f000800 |
34 | #define ATLAS_RTC_DAT_REG 0x1f000808 | 34 | #define ATLAS_RTC_DAT_REG 0x1f000808 |
35 | 35 | ||
36 | |||
37 | /* | 36 | /* |
38 | * Atlas interrupt controller register base. | 37 | * Atlas interrupt controller register base. |
39 | */ | 38 | */ |
40 | #define ATLAS_ICTRL_REGS_BASE 0x1f000000 | 39 | #define ATLAS_ICTRL_REGS_BASE 0x1f000000 |
41 | 40 | ||
42 | /* | 41 | /* |
42 | * Atlas registers are memory mapped on 64-bit aligned boundaries and | ||
43 | * only word access are allowed. | ||
44 | */ | ||
45 | struct atlas_ictrl_regs { | ||
46 | volatile unsigned int intraw; | ||
47 | int dummy1; | ||
48 | volatile unsigned int intseten; | ||
49 | int dummy2; | ||
50 | volatile unsigned int intrsten; | ||
51 | int dummy3; | ||
52 | volatile unsigned int intenable; | ||
53 | int dummy4; | ||
54 | volatile unsigned int intstatus; | ||
55 | int dummy5; | ||
56 | }; | ||
57 | |||
58 | /* | ||
43 | * Atlas UART register base. | 59 | * Atlas UART register base. |
44 | */ | 60 | */ |
45 | #define ATLAS_UART_REGS_BASE 0x1f000900 | 61 | #define ATLAS_UART_REGS_BASE 0x1f000900 |
diff --git a/include/asm-mips/mips-boards/atlasint.h b/include/asm-mips/mips-boards/atlasint.h index bba35c183d08..fd7ebc54fa90 100644 --- a/include/asm-mips/mips-boards/atlasint.h +++ b/include/asm-mips/mips-boards/atlasint.h | |||
@@ -62,23 +62,4 @@ | |||
62 | #define ATLASINT_RES31 (ATLASINT_BASE+31) | 62 | #define ATLASINT_RES31 (ATLASINT_BASE+31) |
63 | #define ATLASINT_END (ATLASINT_BASE+31) | 63 | #define ATLASINT_END (ATLASINT_BASE+31) |
64 | 64 | ||
65 | /* | ||
66 | * Atlas registers are memory mapped on 64-bit aligned boundaries and | ||
67 | * only word access are allowed. | ||
68 | */ | ||
69 | struct atlas_ictrl_regs { | ||
70 | volatile unsigned int intraw; | ||
71 | int dummy1; | ||
72 | volatile unsigned int intseten; | ||
73 | int dummy2; | ||
74 | volatile unsigned int intrsten; | ||
75 | int dummy3; | ||
76 | volatile unsigned int intenable; | ||
77 | int dummy4; | ||
78 | volatile unsigned int intstatus; | ||
79 | int dummy5; | ||
80 | }; | ||
81 | |||
82 | extern void atlasint_init(void); | ||
83 | |||
84 | #endif /* !(_MIPS_ATLASINT_H) */ | 65 | #endif /* !(_MIPS_ATLASINT_H) */ |
diff --git a/include/asm-mips/mips_mt.h b/include/asm-mips/mips_mt.h new file mode 100644 index 000000000000..c31a312b9783 --- /dev/null +++ b/include/asm-mips/mips_mt.h | |||
@@ -0,0 +1,15 @@ | |||
1 | /* | ||
2 | * Definitions and decalrations for MIPS MT support | ||
3 | * that are common between SMTC, VSMP, and/or AP/SP | ||
4 | * kernel models. | ||
5 | */ | ||
6 | #ifndef __ASM_MIPS_MT_H | ||
7 | #define __ASM_MIPS_MT_H | ||
8 | |||
9 | extern cpumask_t mt_fpu_cpumask; | ||
10 | extern unsigned long mt_fpemul_threshold; | ||
11 | |||
12 | extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value); | ||
13 | extern void mips_mt_set_cpuoptions(void); | ||
14 | |||
15 | #endif /* __ASM_MIPS_MT_H */ | ||
diff --git a/include/asm-mips/mipsmtregs.h b/include/asm-mips/mipsmtregs.h index a669c0702c66..f637ce70758f 100644 --- a/include/asm-mips/mipsmtregs.h +++ b/include/asm-mips/mipsmtregs.h | |||
@@ -165,7 +165,7 @@ | |||
165 | 165 | ||
166 | #ifndef __ASSEMBLY__ | 166 | #ifndef __ASSEMBLY__ |
167 | 167 | ||
168 | extern void mips_mt_regdump(void); | 168 | extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value); |
169 | 169 | ||
170 | static inline unsigned int dvpe(void) | 170 | static inline unsigned int dvpe(void) |
171 | { | 171 | { |
@@ -234,7 +234,7 @@ static inline void __raw_emt(void) | |||
234 | __asm__ __volatile__( | 234 | __asm__ __volatile__( |
235 | " .set noreorder \n" | 235 | " .set noreorder \n" |
236 | " .set mips32r2 \n" | 236 | " .set mips32r2 \n" |
237 | " emt \n" | 237 | " .word 0x41600be1 # emt \n" |
238 | " ehb \n" | 238 | " ehb \n" |
239 | " .set mips0 \n" | 239 | " .set mips0 \n" |
240 | " .set reorder"); | 240 | " .set reorder"); |
@@ -282,8 +282,11 @@ static inline void ehb(void) | |||
282 | \ | 282 | \ |
283 | __asm__ __volatile__( \ | 283 | __asm__ __volatile__( \ |
284 | " .set push \n" \ | 284 | " .set push \n" \ |
285 | " .set noat \n" \ | ||
285 | " .set mips32r2 \n" \ | 286 | " .set mips32r2 \n" \ |
286 | " mftgpr %0," #rt " \n" \ | 287 | " # mftgpr $1," #rt " \n" \ |
288 | " .word 0x41000820 | (" #rt " << 16) \n" \ | ||
289 | " move %0, $1 \n" \ | ||
287 | " .set pop \n" \ | 290 | " .set pop \n" \ |
288 | : "=r" (__res)); \ | 291 | : "=r" (__res)); \ |
289 | \ | 292 | \ |
@@ -295,9 +298,7 @@ static inline void ehb(void) | |||
295 | unsigned long __res; \ | 298 | unsigned long __res; \ |
296 | \ | 299 | \ |
297 | __asm__ __volatile__( \ | 300 | __asm__ __volatile__( \ |
298 | ".set noat\n\t" \ | 301 | " mftr %0, " #rt ", " #u ", " #sel " \n" \ |
299 | "mftr\t%0, " #rt ", " #u ", " #sel "\n\t" \ | ||
300 | ".set at\n\t" \ | ||
301 | : "=r" (__res)); \ | 302 | : "=r" (__res)); \ |
302 | \ | 303 | \ |
303 | __res; \ | 304 | __res; \ |
@@ -364,6 +365,9 @@ do { \ | |||
364 | #define read_vpe_c0_ebase() mftc0(15,1) | 365 | #define read_vpe_c0_ebase() mftc0(15,1) |
365 | #define write_vpe_c0_ebase(val) mttc0(15, 1, val) | 366 | #define write_vpe_c0_ebase(val) mttc0(15, 1, val) |
366 | #define write_vpe_c0_compare(val) mttc0(11, 0, val) | 367 | #define write_vpe_c0_compare(val) mttc0(11, 0, val) |
368 | #define read_vpe_c0_badvaddr() mftc0(8, 0) | ||
369 | #define read_vpe_c0_epc() mftc0(14, 0) | ||
370 | #define write_vpe_c0_epc(val) mttc0(14, 0, val) | ||
367 | 371 | ||
368 | 372 | ||
369 | /* TC */ | 373 | /* TC */ |
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h index 035ba0a9b0df..a2ef579f6b1a 100644 --- a/include/asm-mips/mipsregs.h +++ b/include/asm-mips/mipsregs.h | |||
@@ -836,6 +836,9 @@ do { \ | |||
836 | #define read_c0_cache() __read_32bit_c0_register($7, 0) /* TX39xx */ | 836 | #define read_c0_cache() __read_32bit_c0_register($7, 0) /* TX39xx */ |
837 | #define write_c0_cache(val) __write_32bit_c0_register($7, 0, val) | 837 | #define write_c0_cache(val) __write_32bit_c0_register($7, 0, val) |
838 | 838 | ||
839 | #define read_c0_badvaddr() __read_ulong_c0_register($8, 0) | ||
840 | #define write_c0_badvaddr(val) __write_ulong_c0_register($8, 0, val) | ||
841 | |||
839 | #define read_c0_count() __read_32bit_c0_register($9, 0) | 842 | #define read_c0_count() __read_32bit_c0_register($9, 0) |
840 | #define write_c0_count(val) __write_32bit_c0_register($9, 0, val) | 843 | #define write_c0_count(val) __write_32bit_c0_register($9, 0, val) |
841 | 844 | ||
@@ -858,7 +861,19 @@ do { \ | |||
858 | #define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) | 861 | #define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) |
859 | 862 | ||
860 | #define read_c0_status() __read_32bit_c0_register($12, 0) | 863 | #define read_c0_status() __read_32bit_c0_register($12, 0) |
864 | #ifdef CONFIG_MIPS_MT_SMTC | ||
865 | #define write_c0_status(val) \ | ||
866 | do { \ | ||
867 | __write_32bit_c0_register($12, 0, val); \ | ||
868 | __ehb(); \ | ||
869 | } while (0) | ||
870 | #else | ||
871 | /* | ||
872 | * Legacy non-SMTC code, which may be hazardous | ||
873 | * but which might not support EHB | ||
874 | */ | ||
861 | #define write_c0_status(val) __write_32bit_c0_register($12, 0, val) | 875 | #define write_c0_status(val) __write_32bit_c0_register($12, 0, val) |
876 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
862 | 877 | ||
863 | #define read_c0_cause() __read_32bit_c0_register($13, 0) | 878 | #define read_c0_cause() __read_32bit_c0_register($13, 0) |
864 | #define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) | 879 | #define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) |
@@ -1001,6 +1016,9 @@ do { \ | |||
1001 | #define read_c0_taglo() __read_32bit_c0_register($28, 0) | 1016 | #define read_c0_taglo() __read_32bit_c0_register($28, 0) |
1002 | #define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val) | 1017 | #define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val) |
1003 | 1018 | ||
1019 | #define read_c0_dtaglo() __read_32bit_c0_register($28, 2) | ||
1020 | #define write_c0_dtaglo(val) __write_32bit_c0_register($28, 2, val) | ||
1021 | |||
1004 | #define read_c0_taghi() __read_32bit_c0_register($29, 0) | 1022 | #define read_c0_taghi() __read_32bit_c0_register($29, 0) |
1005 | #define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val) | 1023 | #define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val) |
1006 | 1024 | ||
@@ -1354,15 +1372,119 @@ static inline void tlb_write_random(void) | |||
1354 | /* | 1372 | /* |
1355 | * Manipulate bits in a c0 register. | 1373 | * Manipulate bits in a c0 register. |
1356 | */ | 1374 | */ |
1375 | #ifndef CONFIG_MIPS_MT_SMTC | ||
1376 | /* | ||
1377 | * SMTC Linux requires shutting-down microthread scheduling | ||
1378 | * during CP0 register read-modify-write sequences. | ||
1379 | */ | ||
1380 | #define __BUILD_SET_C0(name) \ | ||
1381 | static inline unsigned int \ | ||
1382 | set_c0_##name(unsigned int set) \ | ||
1383 | { \ | ||
1384 | unsigned int res; \ | ||
1385 | \ | ||
1386 | res = read_c0_##name(); \ | ||
1387 | res |= set; \ | ||
1388 | write_c0_##name(res); \ | ||
1389 | \ | ||
1390 | return res; \ | ||
1391 | } \ | ||
1392 | \ | ||
1393 | static inline unsigned int \ | ||
1394 | clear_c0_##name(unsigned int clear) \ | ||
1395 | { \ | ||
1396 | unsigned int res; \ | ||
1397 | \ | ||
1398 | res = read_c0_##name(); \ | ||
1399 | res &= ~clear; \ | ||
1400 | write_c0_##name(res); \ | ||
1401 | \ | ||
1402 | return res; \ | ||
1403 | } \ | ||
1404 | \ | ||
1405 | static inline unsigned int \ | ||
1406 | change_c0_##name(unsigned int change, unsigned int new) \ | ||
1407 | { \ | ||
1408 | unsigned int res; \ | ||
1409 | \ | ||
1410 | res = read_c0_##name(); \ | ||
1411 | res &= ~change; \ | ||
1412 | res |= (new & change); \ | ||
1413 | write_c0_##name(res); \ | ||
1414 | \ | ||
1415 | return res; \ | ||
1416 | } | ||
1417 | |||
1418 | #else /* SMTC versions that manage MT scheduling */ | ||
1419 | |||
1420 | #include <asm/interrupt.h> | ||
1421 | |||
1422 | /* | ||
1423 | * This is a duplicate of dmt() in mipsmtregs.h to avoid problems with | ||
1424 | * header file recursion. | ||
1425 | */ | ||
1426 | static inline unsigned int __dmt(void) | ||
1427 | { | ||
1428 | int res; | ||
1429 | |||
1430 | __asm__ __volatile__( | ||
1431 | " .set push \n" | ||
1432 | " .set mips32r2 \n" | ||
1433 | " .set noat \n" | ||
1434 | " .word 0x41610BC1 # dmt $1 \n" | ||
1435 | " ehb \n" | ||
1436 | " move %0, $1 \n" | ||
1437 | " .set pop \n" | ||
1438 | : "=r" (res)); | ||
1439 | |||
1440 | instruction_hazard(); | ||
1441 | |||
1442 | return res; | ||
1443 | } | ||
1444 | |||
1445 | #define __VPECONTROL_TE_SHIFT 15 | ||
1446 | #define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT) | ||
1447 | |||
1448 | #define __EMT_ENABLE __VPECONTROL_TE | ||
1449 | |||
1450 | static inline void __emt(unsigned int previous) | ||
1451 | { | ||
1452 | if ((previous & __EMT_ENABLE)) | ||
1453 | __asm__ __volatile__( | ||
1454 | " .set noreorder \n" | ||
1455 | " .set mips32r2 \n" | ||
1456 | " .word 0x41600be1 # emt \n" | ||
1457 | " ehb \n" | ||
1458 | " .set mips0 \n" | ||
1459 | " .set reorder \n"); | ||
1460 | } | ||
1461 | |||
1462 | static inline void __ehb(void) | ||
1463 | { | ||
1464 | __asm__ __volatile__( | ||
1465 | " ehb \n"); | ||
1466 | } | ||
1467 | |||
1468 | /* | ||
1469 | * Note that local_irq_save/restore affect TC-specific IXMT state, | ||
1470 | * not Status.IE as in non-SMTC kernel. | ||
1471 | */ | ||
1472 | |||
1357 | #define __BUILD_SET_C0(name) \ | 1473 | #define __BUILD_SET_C0(name) \ |
1358 | static inline unsigned int \ | 1474 | static inline unsigned int \ |
1359 | set_c0_##name(unsigned int set) \ | 1475 | set_c0_##name(unsigned int set) \ |
1360 | { \ | 1476 | { \ |
1361 | unsigned int res; \ | 1477 | unsigned int res; \ |
1478 | unsigned int omt; \ | ||
1479 | unsigned int flags; \ | ||
1362 | \ | 1480 | \ |
1481 | local_irq_save(flags); \ | ||
1482 | omt = __dmt(); \ | ||
1363 | res = read_c0_##name(); \ | 1483 | res = read_c0_##name(); \ |
1364 | res |= set; \ | 1484 | res |= set; \ |
1365 | write_c0_##name(res); \ | 1485 | write_c0_##name(res); \ |
1486 | __emt(omt); \ | ||
1487 | local_irq_restore(flags); \ | ||
1366 | \ | 1488 | \ |
1367 | return res; \ | 1489 | return res; \ |
1368 | } \ | 1490 | } \ |
@@ -1371,10 +1493,16 @@ static inline unsigned int \ | |||
1371 | clear_c0_##name(unsigned int clear) \ | 1493 | clear_c0_##name(unsigned int clear) \ |
1372 | { \ | 1494 | { \ |
1373 | unsigned int res; \ | 1495 | unsigned int res; \ |
1496 | unsigned int omt; \ | ||
1497 | unsigned int flags; \ | ||
1374 | \ | 1498 | \ |
1499 | local_irq_save(flags); \ | ||
1500 | omt = __dmt(); \ | ||
1375 | res = read_c0_##name(); \ | 1501 | res = read_c0_##name(); \ |
1376 | res &= ~clear; \ | 1502 | res &= ~clear; \ |
1377 | write_c0_##name(res); \ | 1503 | write_c0_##name(res); \ |
1504 | __emt(omt); \ | ||
1505 | local_irq_restore(flags); \ | ||
1378 | \ | 1506 | \ |
1379 | return res; \ | 1507 | return res; \ |
1380 | } \ | 1508 | } \ |
@@ -1383,14 +1511,22 @@ static inline unsigned int \ | |||
1383 | change_c0_##name(unsigned int change, unsigned int new) \ | 1511 | change_c0_##name(unsigned int change, unsigned int new) \ |
1384 | { \ | 1512 | { \ |
1385 | unsigned int res; \ | 1513 | unsigned int res; \ |
1514 | unsigned int omt; \ | ||
1515 | unsigned int flags; \ | ||
1386 | \ | 1516 | \ |
1517 | local_irq_save(flags); \ | ||
1518 | \ | ||
1519 | omt = __dmt(); \ | ||
1387 | res = read_c0_##name(); \ | 1520 | res = read_c0_##name(); \ |
1388 | res &= ~change; \ | 1521 | res &= ~change; \ |
1389 | res |= (new & change); \ | 1522 | res |= (new & change); \ |
1390 | write_c0_##name(res); \ | 1523 | write_c0_##name(res); \ |
1524 | __emt(omt); \ | ||
1525 | local_irq_restore(flags); \ | ||
1391 | \ | 1526 | \ |
1392 | return res; \ | 1527 | return res; \ |
1393 | } | 1528 | } |
1529 | #endif | ||
1394 | 1530 | ||
1395 | __BUILD_SET_C0(status) | 1531 | __BUILD_SET_C0(status) |
1396 | __BUILD_SET_C0(cause) | 1532 | __BUILD_SET_C0(cause) |
diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h index 61cf22588137..6e09f4c87211 100644 --- a/include/asm-mips/mmu_context.h +++ b/include/asm-mips/mmu_context.h | |||
@@ -17,6 +17,10 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
20 | #ifdef CONFIG_MIPS_MT_SMTC | ||
21 | #include <asm/mipsmtregs.h> | ||
22 | #include <asm/smtc.h> | ||
23 | #endif /* SMTC */ | ||
20 | 24 | ||
21 | /* | 25 | /* |
22 | * For the fast tlb miss handlers, we keep a per cpu array of pointers | 26 | * For the fast tlb miss handlers, we keep a per cpu array of pointers |
@@ -54,6 +58,14 @@ extern unsigned long pgd_current[]; | |||
54 | #define ASID_INC 0x1 | 58 | #define ASID_INC 0x1 |
55 | #define ASID_MASK 0xfff | 59 | #define ASID_MASK 0xfff |
56 | 60 | ||
61 | /* SMTC/34K debug hack - but maybe we'll keep it */ | ||
62 | #elif defined(CONFIG_MIPS_MT_SMTC) | ||
63 | |||
64 | #define ASID_INC 0x1 | ||
65 | extern unsigned long smtc_asid_mask; | ||
66 | #define ASID_MASK (smtc_asid_mask) | ||
67 | #define HW_ASID_MASK 0xff | ||
68 | /* End SMTC/34K debug hack */ | ||
57 | #else /* FIXME: not correct for R6000 */ | 69 | #else /* FIXME: not correct for R6000 */ |
58 | 70 | ||
59 | #define ASID_INC 0x1 | 71 | #define ASID_INC 0x1 |
@@ -76,6 +88,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
76 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) | 88 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) |
77 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) | 89 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) |
78 | 90 | ||
91 | #ifndef CONFIG_MIPS_MT_SMTC | ||
92 | /* Normal, classic MIPS get_new_mmu_context */ | ||
79 | static inline void | 93 | static inline void |
80 | get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | 94 | get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) |
81 | { | 95 | { |
@@ -91,6 +105,12 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
91 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; | 105 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; |
92 | } | 106 | } |
93 | 107 | ||
108 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
109 | |||
110 | #define get_new_mmu_context(mm,cpu) smtc_get_new_mmu_context((mm),(cpu)) | ||
111 | |||
112 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
113 | |||
94 | /* | 114 | /* |
95 | * Initialize the context related info for a new mm_struct | 115 | * Initialize the context related info for a new mm_struct |
96 | * instance. | 116 | * instance. |
@@ -111,14 +131,46 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
111 | { | 131 | { |
112 | unsigned int cpu = smp_processor_id(); | 132 | unsigned int cpu = smp_processor_id(); |
113 | unsigned long flags; | 133 | unsigned long flags; |
114 | 134 | #ifdef CONFIG_MIPS_MT_SMTC | |
135 | unsigned long oldasid; | ||
136 | unsigned long mtflags; | ||
137 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
115 | local_irq_save(flags); | 138 | local_irq_save(flags); |
139 | mtflags = dvpe(); | ||
140 | #else /* Not SMTC */ | ||
141 | local_irq_save(flags); | ||
142 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
116 | 143 | ||
117 | /* Check if our ASID is of an older version and thus invalid */ | 144 | /* Check if our ASID is of an older version and thus invalid */ |
118 | if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) | 145 | if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) |
119 | get_new_mmu_context(next, cpu); | 146 | get_new_mmu_context(next, cpu); |
120 | 147 | #ifdef CONFIG_MIPS_MT_SMTC | |
148 | /* | ||
149 | * If the EntryHi ASID being replaced happens to be | ||
150 | * the value flagged at ASID recycling time as having | ||
151 | * an extended life, clear the bit showing it being | ||
152 | * in use by this "CPU", and if that's the last bit, | ||
153 | * free up the ASID value for use and flush any old | ||
154 | * instances of it from the TLB. | ||
155 | */ | ||
156 | oldasid = (read_c0_entryhi() & ASID_MASK); | ||
157 | if(smtc_live_asid[mytlb][oldasid]) { | ||
158 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
159 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
160 | smtc_flush_tlb_asid(oldasid); | ||
161 | } | ||
162 | /* | ||
163 | * Tread softly on EntryHi, and so long as we support | ||
164 | * having ASID_MASK smaller than the hardware maximum, | ||
165 | * make sure no "soft" bits become "hard"... | ||
166 | */ | ||
167 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | ||
168 | | (cpu_context(cpu, next) & ASID_MASK)); | ||
169 | ehb(); /* Make sure it propagates to TCStatus */ | ||
170 | evpe(mtflags); | ||
171 | #else | ||
121 | write_c0_entryhi(cpu_context(cpu, next)); | 172 | write_c0_entryhi(cpu_context(cpu, next)); |
173 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
122 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); | 174 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
123 | 175 | ||
124 | /* | 176 | /* |
@@ -151,12 +203,34 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) | |||
151 | unsigned long flags; | 203 | unsigned long flags; |
152 | unsigned int cpu = smp_processor_id(); | 204 | unsigned int cpu = smp_processor_id(); |
153 | 205 | ||
206 | #ifdef CONFIG_MIPS_MT_SMTC | ||
207 | unsigned long oldasid; | ||
208 | unsigned long mtflags; | ||
209 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
210 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
211 | |||
154 | local_irq_save(flags); | 212 | local_irq_save(flags); |
155 | 213 | ||
156 | /* Unconditionally get a new ASID. */ | 214 | /* Unconditionally get a new ASID. */ |
157 | get_new_mmu_context(next, cpu); | 215 | get_new_mmu_context(next, cpu); |
158 | 216 | ||
217 | #ifdef CONFIG_MIPS_MT_SMTC | ||
218 | /* See comments for similar code above */ | ||
219 | mtflags = dvpe(); | ||
220 | oldasid = read_c0_entryhi() & ASID_MASK; | ||
221 | if(smtc_live_asid[mytlb][oldasid]) { | ||
222 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
223 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
224 | smtc_flush_tlb_asid(oldasid); | ||
225 | } | ||
226 | /* See comments for similar code above */ | ||
227 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | | ||
228 | (cpu_context(cpu, next) & ASID_MASK)); | ||
229 | ehb(); /* Make sure it propagates to TCStatus */ | ||
230 | evpe(mtflags); | ||
231 | #else | ||
159 | write_c0_entryhi(cpu_context(cpu, next)); | 232 | write_c0_entryhi(cpu_context(cpu, next)); |
233 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
160 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); | 234 | TLBMISS_HANDLER_SETUP_PGD(next->pgd); |
161 | 235 | ||
162 | /* mark mmu ownership change */ | 236 | /* mark mmu ownership change */ |
@@ -174,17 +248,49 @@ static inline void | |||
174 | drop_mmu_context(struct mm_struct *mm, unsigned cpu) | 248 | drop_mmu_context(struct mm_struct *mm, unsigned cpu) |
175 | { | 249 | { |
176 | unsigned long flags; | 250 | unsigned long flags; |
251 | #ifdef CONFIG_MIPS_MT_SMTC | ||
252 | unsigned long oldasid; | ||
253 | /* Can't use spinlock because called from TLB flush within DVPE */ | ||
254 | unsigned int prevvpe; | ||
255 | int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id; | ||
256 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
177 | 257 | ||
178 | local_irq_save(flags); | 258 | local_irq_save(flags); |
179 | 259 | ||
180 | if (cpu_isset(cpu, mm->cpu_vm_mask)) { | 260 | if (cpu_isset(cpu, mm->cpu_vm_mask)) { |
181 | get_new_mmu_context(mm, cpu); | 261 | get_new_mmu_context(mm, cpu); |
262 | #ifdef CONFIG_MIPS_MT_SMTC | ||
263 | /* See comments for similar code above */ | ||
264 | prevvpe = dvpe(); | ||
265 | oldasid = (read_c0_entryhi() & ASID_MASK); | ||
266 | if(smtc_live_asid[mytlb][oldasid]) { | ||
267 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | ||
268 | if(smtc_live_asid[mytlb][oldasid] == 0) | ||
269 | smtc_flush_tlb_asid(oldasid); | ||
270 | } | ||
271 | /* See comments for similar code above */ | ||
272 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | ||
273 | | cpu_asid(cpu, mm)); | ||
274 | ehb(); /* Make sure it propagates to TCStatus */ | ||
275 | evpe(prevvpe); | ||
276 | #else /* not CONFIG_MIPS_MT_SMTC */ | ||
182 | write_c0_entryhi(cpu_asid(cpu, mm)); | 277 | write_c0_entryhi(cpu_asid(cpu, mm)); |
278 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
183 | } else { | 279 | } else { |
184 | /* will get a new context next time */ | 280 | /* will get a new context next time */ |
281 | #ifndef CONFIG_MIPS_MT_SMTC | ||
185 | cpu_context(cpu, mm) = 0; | 282 | cpu_context(cpu, mm) = 0; |
283 | #else /* SMTC */ | ||
284 | int i; | ||
285 | |||
286 | /* SMTC shares the TLB (and ASIDs) across VPEs */ | ||
287 | for (i = 0; i < num_online_cpus(); i++) { | ||
288 | if((smtc_status & SMTC_TLB_SHARED) | ||
289 | || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | ||
290 | cpu_context(i, mm) = 0; | ||
291 | } | ||
292 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
186 | } | 293 | } |
187 | |||
188 | local_irq_restore(flags); | 294 | local_irq_restore(flags); |
189 | } | 295 | } |
190 | 296 | ||
diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h index 39d2bd50fece..0fb75f0762e0 100644 --- a/include/asm-mips/processor.h +++ b/include/asm-mips/processor.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define _ASM_PROCESSOR_H | 12 | #define _ASM_PROCESSOR_H |
13 | 13 | ||
14 | #include <linux/config.h> | 14 | #include <linux/config.h> |
15 | #include <linux/cpumask.h> | ||
15 | #include <linux/threads.h> | 16 | #include <linux/threads.h> |
16 | 17 | ||
17 | #include <asm/cachectl.h> | 18 | #include <asm/cachectl.h> |
@@ -107,6 +108,10 @@ struct mips_dsp_state { | |||
107 | 108 | ||
108 | #define INIT_DSP {{0,},} | 109 | #define INIT_DSP {{0,},} |
109 | 110 | ||
111 | #define INIT_CPUMASK { \ | ||
112 | {0,} \ | ||
113 | } | ||
114 | |||
110 | typedef struct { | 115 | typedef struct { |
111 | unsigned long seg; | 116 | unsigned long seg; |
112 | } mm_segment_t; | 117 | } mm_segment_t; |
@@ -129,6 +134,12 @@ struct thread_struct { | |||
129 | 134 | ||
130 | /* Saved fpu/fpu emulator stuff. */ | 135 | /* Saved fpu/fpu emulator stuff. */ |
131 | union mips_fpu_union fpu; | 136 | union mips_fpu_union fpu; |
137 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
138 | /* Emulated instruction count */ | ||
139 | unsigned long emulated_fp; | ||
140 | /* Saved per-thread scheduler affinity mask */ | ||
141 | cpumask_t user_cpus_allowed; | ||
142 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
132 | 143 | ||
133 | /* Saved state of the DSP ASE, if available. */ | 144 | /* Saved state of the DSP ASE, if available. */ |
134 | struct mips_dsp_state dsp; | 145 | struct mips_dsp_state dsp; |
@@ -142,6 +153,7 @@ struct thread_struct { | |||
142 | #define MF_LOGADE 2 /* Log address errors to syslog */ | 153 | #define MF_LOGADE 2 /* Log address errors to syslog */ |
143 | #define MF_32BIT_REGS 4 /* also implies 16/32 fprs */ | 154 | #define MF_32BIT_REGS 4 /* also implies 16/32 fprs */ |
144 | #define MF_32BIT_ADDR 8 /* 32-bit address space (o32/n32) */ | 155 | #define MF_32BIT_ADDR 8 /* 32-bit address space (o32/n32) */ |
156 | #define MF_FPUBOUND 0x10 /* thread bound to FPU-full CPU set */ | ||
145 | unsigned long mflags; | 157 | unsigned long mflags; |
146 | unsigned long irix_trampoline; /* Wheee... */ | 158 | unsigned long irix_trampoline; /* Wheee... */ |
147 | unsigned long irix_oldctx; | 159 | unsigned long irix_oldctx; |
@@ -153,6 +165,12 @@ struct thread_struct { | |||
153 | #define MF_N32 MF_32BIT_ADDR | 165 | #define MF_N32 MF_32BIT_ADDR |
154 | #define MF_N64 0 | 166 | #define MF_N64 0 |
155 | 167 | ||
168 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
169 | #define FPAFF_INIT 0, INIT_CPUMASK, | ||
170 | #else | ||
171 | #define FPAFF_INIT | ||
172 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
173 | |||
156 | #define INIT_THREAD { \ | 174 | #define INIT_THREAD { \ |
157 | /* \ | 175 | /* \ |
158 | * saved main processor registers \ | 176 | * saved main processor registers \ |
@@ -168,6 +186,10 @@ struct thread_struct { | |||
168 | */ \ | 186 | */ \ |
169 | INIT_FPU, \ | 187 | INIT_FPU, \ |
170 | /* \ | 188 | /* \ |
189 | * fpu affinity state (null if not FPAFF) \ | ||
190 | */ \ | ||
191 | FPAFF_INIT \ | ||
192 | /* \ | ||
171 | * saved dsp/dsp emulator stuff \ | 193 | * saved dsp/dsp emulator stuff \ |
172 | */ \ | 194 | */ \ |
173 | INIT_DSP, \ | 195 | INIT_DSP, \ |
diff --git a/include/asm-mips/ptrace.h b/include/asm-mips/ptrace.h index 95c5839ac465..fa9d8713c12a 100644 --- a/include/asm-mips/ptrace.h +++ b/include/asm-mips/ptrace.h | |||
@@ -45,6 +45,10 @@ struct pt_regs { | |||
45 | unsigned long cp0_badvaddr; | 45 | unsigned long cp0_badvaddr; |
46 | unsigned long cp0_cause; | 46 | unsigned long cp0_cause; |
47 | unsigned long cp0_epc; | 47 | unsigned long cp0_epc; |
48 | #ifdef CONFIG_MIPS_MT_SMTC | ||
49 | unsigned long cp0_tcstatus; | ||
50 | unsigned long smtc_pad; | ||
51 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
48 | }; | 52 | }; |
49 | 53 | ||
50 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ | 54 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ |
diff --git a/include/asm-mips/r4kcache.h b/include/asm-mips/r4kcache.h index 90c374700977..3c8e3c8d1a9a 100644 --- a/include/asm-mips/r4kcache.h +++ b/include/asm-mips/r4kcache.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/asm.h> | 15 | #include <asm/asm.h> |
16 | #include <asm/cacheops.h> | 16 | #include <asm/cacheops.h> |
17 | #include <asm/cpu-features.h> | 17 | #include <asm/cpu-features.h> |
18 | #include <asm/mipsmtregs.h> | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * This macro return a properly sign-extended address suitable as base address | 21 | * This macro return a properly sign-extended address suitable as base address |
@@ -37,16 +38,120 @@ | |||
37 | " cache %0, %1 \n" \ | 38 | " cache %0, %1 \n" \ |
38 | " .set pop \n" \ | 39 | " .set pop \n" \ |
39 | : \ | 40 | : \ |
40 | : "i" (op), "m" (*(unsigned char *)(addr))) | 41 | : "i" (op), "R" (*(unsigned char *)(addr))) |
42 | |||
43 | #ifdef CONFIG_MIPS_MT | ||
44 | /* | ||
45 | * Temporary hacks for SMTC debug. Optionally force single-threaded | ||
46 | * execution during I-cache flushes. | ||
47 | */ | ||
48 | |||
49 | #define PROTECT_CACHE_FLUSHES 1 | ||
50 | |||
51 | #ifdef PROTECT_CACHE_FLUSHES | ||
52 | |||
53 | extern int mt_protiflush; | ||
54 | extern int mt_protdflush; | ||
55 | extern void mt_cflush_lockdown(void); | ||
56 | extern void mt_cflush_release(void); | ||
57 | |||
58 | #define BEGIN_MT_IPROT \ | ||
59 | unsigned long flags = 0; \ | ||
60 | unsigned long mtflags = 0; \ | ||
61 | if(mt_protiflush) { \ | ||
62 | local_irq_save(flags); \ | ||
63 | ehb(); \ | ||
64 | mtflags = dvpe(); \ | ||
65 | mt_cflush_lockdown(); \ | ||
66 | } | ||
67 | |||
68 | #define END_MT_IPROT \ | ||
69 | if(mt_protiflush) { \ | ||
70 | mt_cflush_release(); \ | ||
71 | evpe(mtflags); \ | ||
72 | local_irq_restore(flags); \ | ||
73 | } | ||
74 | |||
75 | #define BEGIN_MT_DPROT \ | ||
76 | unsigned long flags = 0; \ | ||
77 | unsigned long mtflags = 0; \ | ||
78 | if(mt_protdflush) { \ | ||
79 | local_irq_save(flags); \ | ||
80 | ehb(); \ | ||
81 | mtflags = dvpe(); \ | ||
82 | mt_cflush_lockdown(); \ | ||
83 | } | ||
84 | |||
85 | #define END_MT_DPROT \ | ||
86 | if(mt_protdflush) { \ | ||
87 | mt_cflush_release(); \ | ||
88 | evpe(mtflags); \ | ||
89 | local_irq_restore(flags); \ | ||
90 | } | ||
91 | |||
92 | #else | ||
93 | |||
94 | #define BEGIN_MT_IPROT | ||
95 | #define BEGIN_MT_DPROT | ||
96 | #define END_MT_IPROT | ||
97 | #define END_MT_DPROT | ||
98 | |||
99 | #endif /* PROTECT_CACHE_FLUSHES */ | ||
100 | |||
101 | #define __iflush_prologue \ | ||
102 | unsigned long redundance; \ | ||
103 | extern int mt_n_iflushes; \ | ||
104 | BEGIN_MT_IPROT \ | ||
105 | for (redundance = 0; redundance < mt_n_iflushes; redundance++) { | ||
106 | |||
107 | #define __iflush_epilogue \ | ||
108 | END_MT_IPROT \ | ||
109 | } | ||
110 | |||
111 | #define __dflush_prologue \ | ||
112 | unsigned long redundance; \ | ||
113 | extern int mt_n_dflushes; \ | ||
114 | BEGIN_MT_DPROT \ | ||
115 | for (redundance = 0; redundance < mt_n_dflushes; redundance++) { | ||
116 | |||
117 | #define __dflush_epilogue \ | ||
118 | END_MT_DPROT \ | ||
119 | } | ||
120 | |||
121 | #define __inv_dflush_prologue __dflush_prologue | ||
122 | #define __inv_dflush_epilogue __dflush_epilogue | ||
123 | #define __sflush_prologue { | ||
124 | #define __sflush_epilogue } | ||
125 | #define __inv_sflush_prologue __sflush_prologue | ||
126 | #define __inv_sflush_epilogue __sflush_epilogue | ||
127 | |||
128 | #else /* CONFIG_MIPS_MT */ | ||
129 | |||
130 | #define __iflush_prologue { | ||
131 | #define __iflush_epilogue } | ||
132 | #define __dflush_prologue { | ||
133 | #define __dflush_epilogue } | ||
134 | #define __inv_dflush_prologue { | ||
135 | #define __inv_dflush_epilogue } | ||
136 | #define __sflush_prologue { | ||
137 | #define __sflush_epilogue } | ||
138 | #define __inv_sflush_prologue { | ||
139 | #define __inv_sflush_epilogue } | ||
140 | |||
141 | #endif /* CONFIG_MIPS_MT */ | ||
41 | 142 | ||
42 | static inline void flush_icache_line_indexed(unsigned long addr) | 143 | static inline void flush_icache_line_indexed(unsigned long addr) |
43 | { | 144 | { |
145 | __iflush_prologue | ||
44 | cache_op(Index_Invalidate_I, addr); | 146 | cache_op(Index_Invalidate_I, addr); |
147 | __iflush_epilogue | ||
45 | } | 148 | } |
46 | 149 | ||
47 | static inline void flush_dcache_line_indexed(unsigned long addr) | 150 | static inline void flush_dcache_line_indexed(unsigned long addr) |
48 | { | 151 | { |
152 | __dflush_prologue | ||
49 | cache_op(Index_Writeback_Inv_D, addr); | 153 | cache_op(Index_Writeback_Inv_D, addr); |
154 | __dflush_epilogue | ||
50 | } | 155 | } |
51 | 156 | ||
52 | static inline void flush_scache_line_indexed(unsigned long addr) | 157 | static inline void flush_scache_line_indexed(unsigned long addr) |
@@ -56,17 +161,23 @@ static inline void flush_scache_line_indexed(unsigned long addr) | |||
56 | 161 | ||
57 | static inline void flush_icache_line(unsigned long addr) | 162 | static inline void flush_icache_line(unsigned long addr) |
58 | { | 163 | { |
164 | __iflush_prologue | ||
59 | cache_op(Hit_Invalidate_I, addr); | 165 | cache_op(Hit_Invalidate_I, addr); |
166 | __iflush_epilogue | ||
60 | } | 167 | } |
61 | 168 | ||
62 | static inline void flush_dcache_line(unsigned long addr) | 169 | static inline void flush_dcache_line(unsigned long addr) |
63 | { | 170 | { |
171 | __dflush_prologue | ||
64 | cache_op(Hit_Writeback_Inv_D, addr); | 172 | cache_op(Hit_Writeback_Inv_D, addr); |
173 | __dflush_epilogue | ||
65 | } | 174 | } |
66 | 175 | ||
67 | static inline void invalidate_dcache_line(unsigned long addr) | 176 | static inline void invalidate_dcache_line(unsigned long addr) |
68 | { | 177 | { |
178 | __dflush_prologue | ||
69 | cache_op(Hit_Invalidate_D, addr); | 179 | cache_op(Hit_Invalidate_D, addr); |
180 | __dflush_epilogue | ||
70 | } | 181 | } |
71 | 182 | ||
72 | static inline void invalidate_scache_line(unsigned long addr) | 183 | static inline void invalidate_scache_line(unsigned long addr) |
@@ -239,9 +350,13 @@ static inline void blast_##pfx##cache##lsize(void) \ | |||
239 | current_cpu_data.desc.waybit; \ | 350 | current_cpu_data.desc.waybit; \ |
240 | unsigned long ws, addr; \ | 351 | unsigned long ws, addr; \ |
241 | \ | 352 | \ |
353 | __##pfx##flush_prologue \ | ||
354 | \ | ||
242 | for (ws = 0; ws < ws_end; ws += ws_inc) \ | 355 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
243 | for (addr = start; addr < end; addr += lsize * 32) \ | 356 | for (addr = start; addr < end; addr += lsize * 32) \ |
244 | cache##lsize##_unroll32(addr|ws,indexop); \ | 357 | cache##lsize##_unroll32(addr|ws,indexop); \ |
358 | \ | ||
359 | __##pfx##flush_epilogue \ | ||
245 | } \ | 360 | } \ |
246 | \ | 361 | \ |
247 | static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ | 362 | static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ |
@@ -249,10 +364,14 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ | |||
249 | unsigned long start = page; \ | 364 | unsigned long start = page; \ |
250 | unsigned long end = page + PAGE_SIZE; \ | 365 | unsigned long end = page + PAGE_SIZE; \ |
251 | \ | 366 | \ |
367 | __##pfx##flush_prologue \ | ||
368 | \ | ||
252 | do { \ | 369 | do { \ |
253 | cache##lsize##_unroll32(start,hitop); \ | 370 | cache##lsize##_unroll32(start,hitop); \ |
254 | start += lsize * 32; \ | 371 | start += lsize * 32; \ |
255 | } while (start < end); \ | 372 | } while (start < end); \ |
373 | \ | ||
374 | __##pfx##flush_epilogue \ | ||
256 | } \ | 375 | } \ |
257 | \ | 376 | \ |
258 | static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ | 377 | static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ |
@@ -265,9 +384,13 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) | |||
265 | current_cpu_data.desc.waybit; \ | 384 | current_cpu_data.desc.waybit; \ |
266 | unsigned long ws, addr; \ | 385 | unsigned long ws, addr; \ |
267 | \ | 386 | \ |
387 | __##pfx##flush_prologue \ | ||
388 | \ | ||
268 | for (ws = 0; ws < ws_end; ws += ws_inc) \ | 389 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
269 | for (addr = start; addr < end; addr += lsize * 32) \ | 390 | for (addr = start; addr < end; addr += lsize * 32) \ |
270 | cache##lsize##_unroll32(addr|ws,indexop); \ | 391 | cache##lsize##_unroll32(addr|ws,indexop); \ |
392 | \ | ||
393 | __##pfx##flush_epilogue \ | ||
271 | } | 394 | } |
272 | 395 | ||
273 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) | 396 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) |
@@ -288,12 +411,17 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \ | |||
288 | unsigned long lsize = cpu_##desc##_line_size(); \ | 411 | unsigned long lsize = cpu_##desc##_line_size(); \ |
289 | unsigned long addr = start & ~(lsize - 1); \ | 412 | unsigned long addr = start & ~(lsize - 1); \ |
290 | unsigned long aend = (end - 1) & ~(lsize - 1); \ | 413 | unsigned long aend = (end - 1) & ~(lsize - 1); \ |
414 | \ | ||
415 | __##pfx##flush_prologue \ | ||
416 | \ | ||
291 | while (1) { \ | 417 | while (1) { \ |
292 | prot##cache_op(hitop, addr); \ | 418 | prot##cache_op(hitop, addr); \ |
293 | if (addr == aend) \ | 419 | if (addr == aend) \ |
294 | break; \ | 420 | break; \ |
295 | addr += lsize; \ | 421 | addr += lsize; \ |
296 | } \ | 422 | } \ |
423 | \ | ||
424 | __##pfx##flush_epilogue \ | ||
297 | } | 425 | } |
298 | 426 | ||
299 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) | 427 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) |
diff --git a/include/asm-mips/rtc.h b/include/asm-mips/rtc.h index a2abc4572b63..82ad401c7dca 100644 --- a/include/asm-mips/rtc.h +++ b/include/asm-mips/rtc.h | |||
@@ -32,7 +32,7 @@ static inline unsigned int get_rtc_time(struct rtc_time *time) | |||
32 | { | 32 | { |
33 | unsigned long nowtime; | 33 | unsigned long nowtime; |
34 | 34 | ||
35 | nowtime = rtc_get_time(); | 35 | nowtime = rtc_mips_get_time(); |
36 | to_tm(nowtime, time); | 36 | to_tm(nowtime, time); |
37 | time->tm_year -= 1900; | 37 | time->tm_year -= 1900; |
38 | 38 | ||
@@ -47,7 +47,7 @@ static inline int set_rtc_time(struct rtc_time *time) | |||
47 | nowtime = mktime(time->tm_year+1900, time->tm_mon+1, | 47 | nowtime = mktime(time->tm_year+1900, time->tm_mon+1, |
48 | time->tm_mday, time->tm_hour, time->tm_min, | 48 | time->tm_mday, time->tm_hour, time->tm_min, |
49 | time->tm_sec); | 49 | time->tm_sec); |
50 | ret = rtc_set_time(nowtime); | 50 | ret = rtc_mips_set_time(nowtime); |
51 | 51 | ||
52 | return ret; | 52 | return ret; |
53 | } | 53 | } |
diff --git a/include/asm-mips/rtlx.h b/include/asm-mips/rtlx.h index 1298c3fdf6c9..76cd51c6be39 100644 --- a/include/asm-mips/rtlx.h +++ b/include/asm-mips/rtlx.h | |||
@@ -3,32 +3,46 @@ | |||
3 | * | 3 | * |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #ifndef _RTLX_H | 6 | #ifndef __ASM_RTLX_H |
7 | #define _RTLX_H_ | 7 | #define __ASM_RTLX_H_ |
8 | 8 | ||
9 | #define LX_NODE_BASE 10 | 9 | #define LX_NODE_BASE 10 |
10 | 10 | ||
11 | #define MIPSCPU_INT_BASE 16 | 11 | #define MIPSCPU_INT_BASE 16 |
12 | #define MIPS_CPU_RTLX_IRQ 0 | 12 | #define MIPS_CPU_RTLX_IRQ 0 |
13 | 13 | ||
14 | #define RTLX_VERSION 1 | 14 | #define RTLX_VERSION 2 |
15 | #define RTLX_xID 0x12345600 | 15 | #define RTLX_xID 0x12345600 |
16 | #define RTLX_ID (RTLX_xID | RTLX_VERSION) | 16 | #define RTLX_ID (RTLX_xID | RTLX_VERSION) |
17 | #define RTLX_CHANNELS 8 | 17 | #define RTLX_CHANNELS 8 |
18 | 18 | ||
19 | #define RTLX_BUFFER_SIZE 1024 | 19 | #define RTLX_CHANNEL_STDIO 0 |
20 | #define RTLX_CHANNEL_DBG 1 | ||
21 | #define RTLX_CHANNEL_SYSIO 2 | ||
20 | 22 | ||
21 | /* | 23 | extern int rtlx_open(int index, int can_sleep); |
22 | * lx_state bits | 24 | extern int rtlx_release(int index); |
23 | */ | 25 | extern ssize_t rtlx_read(int index, void *buff, size_t count, int user); |
24 | #define RTLX_STATE_OPENED 1UL | 26 | extern ssize_t rtlx_write(int index, void *buffer, size_t count, int user); |
27 | extern unsigned int rtlx_read_poll(int index, int can_sleep); | ||
28 | extern unsigned int rtlx_write_poll(int index); | ||
29 | |||
30 | enum rtlx_state { | ||
31 | RTLX_STATE_UNUSED, | ||
32 | RTLX_STATE_INITIALISED, | ||
33 | RTLX_STATE_REMOTE_READY, | ||
34 | RTLX_STATE_OPENED | ||
35 | }; | ||
36 | |||
37 | #define RTLX_BUFFER_SIZE 1024 | ||
25 | 38 | ||
26 | /* each channel supports read and write. | 39 | /* each channel supports read and write. |
27 | linux (vpe0) reads lx_buffer and writes rt_buffer | 40 | linux (vpe0) reads lx_buffer and writes rt_buffer |
28 | SP (vpe1) reads rt_buffer and writes lx_buffer | 41 | SP (vpe1) reads rt_buffer and writes lx_buffer |
29 | */ | 42 | */ |
30 | struct rtlx_channel { | 43 | struct rtlx_channel { |
31 | unsigned long lx_state; | 44 | enum rtlx_state rt_state; |
45 | enum rtlx_state lx_state; | ||
32 | 46 | ||
33 | int buffer_size; | 47 | int buffer_size; |
34 | 48 | ||
@@ -38,15 +52,13 @@ struct rtlx_channel { | |||
38 | 52 | ||
39 | int lx_write, lx_read; | 53 | int lx_write, lx_read; |
40 | char *lx_buffer; | 54 | char *lx_buffer; |
41 | |||
42 | void *queues; | ||
43 | |||
44 | }; | 55 | }; |
45 | 56 | ||
46 | struct rtlx_info { | 57 | struct rtlx_info { |
47 | unsigned long id; | 58 | unsigned long id; |
59 | enum rtlx_state state; | ||
48 | 60 | ||
49 | struct rtlx_channel channel[RTLX_CHANNELS]; | 61 | struct rtlx_channel channel[RTLX_CHANNELS]; |
50 | }; | 62 | }; |
51 | 63 | ||
52 | #endif /* _RTLX_H_ */ | 64 | #endif /* __ASM_RTLX_H_ */ |
diff --git a/include/asm-mips/serial.h b/include/asm-mips/serial.h index 7b2366412203..7196ceb0e948 100644 --- a/include/asm-mips/serial.h +++ b/include/asm-mips/serial.h | |||
@@ -77,15 +77,15 @@ | |||
77 | #include <asm/it8712.h> | 77 | #include <asm/it8712.h> |
78 | #define ITE_SERIAL_PORT_DEFNS \ | 78 | #define ITE_SERIAL_PORT_DEFNS \ |
79 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_UART_BASE), \ | 79 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_UART_BASE), \ |
80 | .irq = IT8172_UART_IRQ, .flags = STD_COM_FLAGS, .type = 0x3 }, \ | 80 | .irq = IT8172_UART_IRQ, .flags = STD_COM_FLAGS, .port = PORT_16550 }, \ |
81 | { .baud_base = (24000000/(16*13)), .port = (IT8172_PCI_IO_BASE + IT8712_UART1_PORT), \ | 81 | { .baud_base = (24000000/(16*13)), .port = (IT8172_PCI_IO_BASE + IT8712_UART1_PORT), \ |
82 | .irq = IT8172_SERIRQ_4, .flags = STD_COM_FLAGS, .type = 0x3 }, \ | 82 | .irq = IT8172_SERIRQ_4, .flags = STD_COM_FLAGS, .port = PORT_16550 }, \ |
83 | /* Smart Card Reader 0 */ \ | 83 | /* Smart Card Reader 0 */ \ |
84 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_SCR0_BASE), \ | 84 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_SCR0_BASE), \ |
85 | .irq = IT8172_SCR0_IRQ, .flags = STD_COM_FLAGS, .type = 0x3 }, \ | 85 | .irq = IT8172_SCR0_IRQ, .flags = STD_COM_FLAGS, .port = PORT_16550 }, \ |
86 | /* Smart Card Reader 1 */ \ | 86 | /* Smart Card Reader 1 */ \ |
87 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_SCR1_BASE), \ | 87 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_SCR1_BASE), \ |
88 | .irq = IT8172_SCR1_IRQ, .flags = STD_COM_FLAGS, .type = 0x3 }, | 88 | .irq = IT8172_SCR1_IRQ, .flags = STD_COM_FLAGS, .port = PORT_16550 }, |
89 | #else | 89 | #else |
90 | #define ITE_SERIAL_PORT_DEFNS | 90 | #define ITE_SERIAL_PORT_DEFNS |
91 | #endif | 91 | #endif |
@@ -95,10 +95,10 @@ | |||
95 | #include <asm/it8172/it8172_int.h> | 95 | #include <asm/it8172/it8172_int.h> |
96 | #define IVR_SERIAL_PORT_DEFNS \ | 96 | #define IVR_SERIAL_PORT_DEFNS \ |
97 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_UART_BASE), \ | 97 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_UART_BASE), \ |
98 | .irq = IT8172_UART_IRQ, .flags = STD_COM_FLAGS, .type = 0x3 }, \ | 98 | .irq = IT8172_UART_IRQ, .flags = STD_COM_FLAGS, .port = PORT_16550 }, \ |
99 | /* Smart Card Reader 1 */ \ | 99 | /* Smart Card Reader 1 */ \ |
100 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_SCR1_BASE), \ | 100 | { .baud_base = BASE_BAUD, .port = (IT8172_PCI_IO_BASE + IT_SCR1_BASE), \ |
101 | .irq = IT8172_SCR1_IRQ, .flags = STD_COM_FLAGS, .type = 0x3 }, | 101 | .irq = IT8172_SCR1_IRQ, .flags = STD_COM_FLAGS, .port = PORT_16550 }, |
102 | #else | 102 | #else |
103 | #define IVR_SERIAL_PORT_DEFNS | 103 | #define IVR_SERIAL_PORT_DEFNS |
104 | #endif | 104 | #endif |
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h new file mode 100644 index 000000000000..e1941d1b8726 --- /dev/null +++ b/include/asm-mips/smtc.h | |||
@@ -0,0 +1,55 @@ | |||
1 | #ifndef _ASM_SMTC_MT_H | ||
2 | #define _ASM_SMTC_MT_H | ||
3 | |||
4 | /* | ||
5 | * Definitions for SMTC multitasking on MIPS MT cores | ||
6 | */ | ||
7 | |||
8 | #include <asm/mips_mt.h> | ||
9 | |||
10 | /* | ||
11 | * System-wide SMTC status information | ||
12 | */ | ||
13 | |||
14 | extern unsigned int smtc_status; | ||
15 | |||
16 | #define SMTC_TLB_SHARED 0x00000001 | ||
17 | #define SMTC_MTC_ACTIVE 0x00000002 | ||
18 | |||
19 | /* | ||
20 | * TLB/ASID Management information | ||
21 | */ | ||
22 | |||
23 | #define MAX_SMTC_TLBS 2 | ||
24 | #define MAX_SMTC_ASIDS 256 | ||
25 | #if NR_CPUS <= 8 | ||
26 | typedef char asiduse; | ||
27 | #else | ||
28 | #if NR_CPUS <= 16 | ||
29 | typedef short asiduse; | ||
30 | #else | ||
31 | typedef long asiduse; | ||
32 | #endif | ||
33 | #endif | ||
34 | |||
35 | extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | ||
36 | |||
37 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); | ||
38 | |||
39 | void smtc_flush_tlb_asid(unsigned long asid); | ||
40 | extern int mipsmt_build_cpu_map(int startslot); | ||
41 | extern void mipsmt_prepare_cpus(void); | ||
42 | extern void smtc_smp_finish(void); | ||
43 | extern void smtc_boot_secondary(int cpu, struct task_struct *t); | ||
44 | |||
45 | /* | ||
46 | * Sharing the TLB between multiple VPEs means that the | ||
47 | * "random" index selection function is not allowed to | ||
48 | * select the current value of the Index register. To | ||
49 | * avoid additional TLB pressure, the Index registers | ||
50 | * are "parked" with an non-Valid value. | ||
51 | */ | ||
52 | |||
53 | #define PARKED_INDEX ((unsigned int)0x80000000) | ||
54 | |||
55 | #endif /* _ASM_SMTC_MT_H */ | ||
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h new file mode 100644 index 000000000000..f22c3e2f993a --- /dev/null +++ b/include/asm-mips/smtc_ipi.h | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code. | ||
3 | */ | ||
4 | #ifndef __ASM_SMTC_IPI_H | ||
5 | #define __ASM_SMTC_IPI_H | ||
6 | |||
7 | //#define SMTC_IPI_DEBUG | ||
8 | |||
9 | #ifdef SMTC_IPI_DEBUG | ||
10 | #include <asm/mipsregs.h> | ||
11 | #include <asm/mipsmtregs.h> | ||
12 | #endif /* SMTC_IPI_DEBUG */ | ||
13 | |||
14 | /* | ||
15 | * An IPI "message" | ||
16 | */ | ||
17 | |||
18 | struct smtc_ipi { | ||
19 | struct smtc_ipi *flink; | ||
20 | int type; | ||
21 | void *arg; | ||
22 | int dest; | ||
23 | #ifdef SMTC_IPI_DEBUG | ||
24 | int sender; | ||
25 | long stamp; | ||
26 | #endif /* SMTC_IPI_DEBUG */ | ||
27 | }; | ||
28 | |||
29 | /* | ||
30 | * Defined IPI Types | ||
31 | */ | ||
32 | |||
33 | #define LINUX_SMP_IPI 1 | ||
34 | #define SMTC_CLOCK_TICK 2 | ||
35 | |||
36 | /* | ||
37 | * A queue of IPI messages | ||
38 | */ | ||
39 | |||
40 | struct smtc_ipi_q { | ||
41 | struct smtc_ipi *head; | ||
42 | spinlock_t lock; | ||
43 | struct smtc_ipi *tail; | ||
44 | int depth; | ||
45 | }; | ||
46 | |||
47 | extern struct smtc_ipi_q IPIQ[NR_CPUS]; | ||
48 | extern struct smtc_ipi_q freeIPIq; | ||
49 | |||
50 | static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) | ||
51 | { | ||
52 | long flags; | ||
53 | |||
54 | spin_lock_irqsave(&q->lock, flags); | ||
55 | if (q->head == NULL) | ||
56 | q->head = q->tail = p; | ||
57 | else | ||
58 | q->tail->flink = p; | ||
59 | p->flink = NULL; | ||
60 | q->tail = p; | ||
61 | q->depth++; | ||
62 | #ifdef SMTC_IPI_DEBUG | ||
63 | p->sender = read_c0_tcbind(); | ||
64 | p->stamp = read_c0_count(); | ||
65 | #endif /* SMTC_IPI_DEBUG */ | ||
66 | spin_unlock_irqrestore(&q->lock, flags); | ||
67 | } | ||
68 | |||
69 | static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q) | ||
70 | { | ||
71 | struct smtc_ipi *p; | ||
72 | long flags; | ||
73 | |||
74 | spin_lock_irqsave(&q->lock, flags); | ||
75 | if (q->head == NULL) | ||
76 | p = NULL; | ||
77 | else { | ||
78 | p = q->head; | ||
79 | q->head = q->head->flink; | ||
80 | q->depth--; | ||
81 | /* Arguably unnecessary, but leaves queue cleaner */ | ||
82 | if (q->head == NULL) | ||
83 | q->tail = NULL; | ||
84 | } | ||
85 | spin_unlock_irqrestore(&q->lock, flags); | ||
86 | return p; | ||
87 | } | ||
88 | |||
89 | static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p) | ||
90 | { | ||
91 | long flags; | ||
92 | |||
93 | spin_lock_irqsave(&q->lock, flags); | ||
94 | if (q->head == NULL) { | ||
95 | q->head = q->tail = p; | ||
96 | p->flink = NULL; | ||
97 | } else { | ||
98 | p->flink = q->head; | ||
99 | q->head = p; | ||
100 | } | ||
101 | q->depth++; | ||
102 | spin_unlock_irqrestore(&q->lock, flags); | ||
103 | } | ||
104 | |||
105 | static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q) | ||
106 | { | ||
107 | long flags; | ||
108 | int retval; | ||
109 | |||
110 | spin_lock_irqsave(&q->lock, flags); | ||
111 | retval = q->depth; | ||
112 | spin_unlock_irqrestore(&q->lock, flags); | ||
113 | return retval; | ||
114 | } | ||
115 | |||
116 | extern void smtc_send_ipi(int cpu, int type, unsigned int action); | ||
117 | |||
118 | #endif /* __ASM_SMTC_IPI_H */ | ||
diff --git a/include/asm-mips/smtc_proc.h b/include/asm-mips/smtc_proc.h new file mode 100644 index 000000000000..25da651f1f5f --- /dev/null +++ b/include/asm-mips/smtc_proc.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Definitions for SMTC /proc entries | ||
3 | * Copyright(C) 2005 MIPS Technologies Inc. | ||
4 | */ | ||
5 | #ifndef __ASM_SMTC_PROC_H | ||
6 | #define __ASM_SMTC_PROC_H | ||
7 | |||
8 | /* | ||
9 | * per-"CPU" statistics | ||
10 | */ | ||
11 | |||
12 | struct smtc_cpu_proc { | ||
13 | unsigned long timerints; | ||
14 | unsigned long selfipis; | ||
15 | }; | ||
16 | |||
17 | extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; | ||
18 | |||
19 | /* Count of number of recoveries of "stolen" FPU access rights on 34K */ | ||
20 | |||
21 | extern atomic_t smtc_fpu_recoveries; | ||
22 | |||
23 | #endif /* __ASM_SMTC_PROC_H */ | ||
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h index 2acf3e844f00..c4856a874965 100644 --- a/include/asm-mips/stackframe.h +++ b/include/asm-mips/stackframe.h | |||
@@ -14,9 +14,14 @@ | |||
14 | #include <linux/threads.h> | 14 | #include <linux/threads.h> |
15 | 15 | ||
16 | #include <asm/asm.h> | 16 | #include <asm/asm.h> |
17 | #include <asm/asmmacro.h> | ||
17 | #include <asm/mipsregs.h> | 18 | #include <asm/mipsregs.h> |
18 | #include <asm/asm-offsets.h> | 19 | #include <asm/asm-offsets.h> |
19 | 20 | ||
21 | #ifdef CONFIG_MIPS_MT_SMTC | ||
22 | #include <asm/mipsmtregs.h> | ||
23 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
24 | |||
20 | .macro SAVE_AT | 25 | .macro SAVE_AT |
21 | .set push | 26 | .set push |
22 | .set noat | 27 | .set noat |
@@ -57,13 +62,30 @@ | |||
57 | #ifdef CONFIG_SMP | 62 | #ifdef CONFIG_SMP |
58 | .macro get_saved_sp /* SMP variation */ | 63 | .macro get_saved_sp /* SMP variation */ |
59 | #ifdef CONFIG_32BIT | 64 | #ifdef CONFIG_32BIT |
65 | #ifdef CONFIG_MIPS_MT_SMTC | ||
66 | .set mips32 | ||
67 | mfc0 k0, CP0_TCBIND; | ||
68 | .set mips0 | ||
69 | lui k1, %hi(kernelsp) | ||
70 | srl k0, k0, 19 | ||
71 | /* No need to shift down and up to clear bits 0-1 */ | ||
72 | #else | ||
60 | mfc0 k0, CP0_CONTEXT | 73 | mfc0 k0, CP0_CONTEXT |
61 | lui k1, %hi(kernelsp) | 74 | lui k1, %hi(kernelsp) |
62 | srl k0, k0, 23 | 75 | srl k0, k0, 23 |
76 | #endif | ||
63 | addu k1, k0 | 77 | addu k1, k0 |
64 | LONG_L k1, %lo(kernelsp)(k1) | 78 | LONG_L k1, %lo(kernelsp)(k1) |
65 | #endif | 79 | #endif |
66 | #ifdef CONFIG_64BIT | 80 | #ifdef CONFIG_64BIT |
81 | #ifdef CONFIG_MIPS_MT_SMTC | ||
82 | .set mips64 | ||
83 | mfc0 k0, CP0_TCBIND; | ||
84 | .set mips0 | ||
85 | lui k0, %highest(kernelsp) | ||
86 | dsrl k1, 19 | ||
87 | /* No need to shift down and up to clear bits 0-2 */ | ||
88 | #else | ||
67 | MFC0 k1, CP0_CONTEXT | 89 | MFC0 k1, CP0_CONTEXT |
68 | lui k0, %highest(kernelsp) | 90 | lui k0, %highest(kernelsp) |
69 | dsrl k1, 23 | 91 | dsrl k1, 23 |
@@ -71,20 +93,31 @@ | |||
71 | dsll k0, k0, 16 | 93 | dsll k0, k0, 16 |
72 | daddiu k0, %hi(kernelsp) | 94 | daddiu k0, %hi(kernelsp) |
73 | dsll k0, k0, 16 | 95 | dsll k0, k0, 16 |
96 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
74 | daddu k1, k1, k0 | 97 | daddu k1, k1, k0 |
75 | LONG_L k1, %lo(kernelsp)(k1) | 98 | LONG_L k1, %lo(kernelsp)(k1) |
76 | #endif | 99 | #endif /* CONFIG_64BIT */ |
77 | .endm | 100 | .endm |
78 | 101 | ||
79 | .macro set_saved_sp stackp temp temp2 | 102 | .macro set_saved_sp stackp temp temp2 |
80 | #ifdef CONFIG_32BIT | 103 | #ifdef CONFIG_32BIT |
104 | #ifdef CONFIG_MIPS_MT_SMTC | ||
105 | mfc0 \temp, CP0_TCBIND | ||
106 | srl \temp, 19 | ||
107 | #else | ||
81 | mfc0 \temp, CP0_CONTEXT | 108 | mfc0 \temp, CP0_CONTEXT |
82 | srl \temp, 23 | 109 | srl \temp, 23 |
83 | #endif | 110 | #endif |
111 | #endif | ||
84 | #ifdef CONFIG_64BIT | 112 | #ifdef CONFIG_64BIT |
113 | #ifdef CONFIG_MIPS_MT_SMTC | ||
114 | mfc0 \temp, CP0_TCBIND | ||
115 | dsrl \temp, 19 | ||
116 | #else | ||
85 | MFC0 \temp, CP0_CONTEXT | 117 | MFC0 \temp, CP0_CONTEXT |
86 | dsrl \temp, 23 | 118 | dsrl \temp, 23 |
87 | #endif | 119 | #endif |
120 | #endif | ||
88 | LONG_S \stackp, kernelsp(\temp) | 121 | LONG_S \stackp, kernelsp(\temp) |
89 | .endm | 122 | .endm |
90 | #else | 123 | #else |
@@ -122,10 +155,25 @@ | |||
122 | PTR_SUBU sp, k1, PT_SIZE | 155 | PTR_SUBU sp, k1, PT_SIZE |
123 | LONG_S k0, PT_R29(sp) | 156 | LONG_S k0, PT_R29(sp) |
124 | LONG_S $3, PT_R3(sp) | 157 | LONG_S $3, PT_R3(sp) |
158 | /* | ||
159 | * You might think that you don't need to save $0, | ||
160 | * but the FPU emulator and gdb remote debug stub | ||
161 | * need it to operate correctly | ||
162 | */ | ||
125 | LONG_S $0, PT_R0(sp) | 163 | LONG_S $0, PT_R0(sp) |
126 | mfc0 v1, CP0_STATUS | 164 | mfc0 v1, CP0_STATUS |
127 | LONG_S $2, PT_R2(sp) | 165 | LONG_S $2, PT_R2(sp) |
128 | LONG_S v1, PT_STATUS(sp) | 166 | LONG_S v1, PT_STATUS(sp) |
167 | #ifdef CONFIG_MIPS_MT_SMTC | ||
168 | /* | ||
169 | * Ideally, these instructions would be shuffled in | ||
170 | * to cover the pipeline delay. | ||
171 | */ | ||
172 | .set mips32 | ||
173 | mfc0 v1, CP0_TCSTATUS | ||
174 | .set mips0 | ||
175 | LONG_S v1, PT_TCSTATUS(sp) | ||
176 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
129 | LONG_S $4, PT_R4(sp) | 177 | LONG_S $4, PT_R4(sp) |
130 | mfc0 v1, CP0_CAUSE | 178 | mfc0 v1, CP0_CAUSE |
131 | LONG_S $5, PT_R5(sp) | 179 | LONG_S $5, PT_R5(sp) |
@@ -234,14 +282,36 @@ | |||
234 | .endm | 282 | .endm |
235 | 283 | ||
236 | #else | 284 | #else |
285 | /* | ||
286 | * For SMTC kernel, global IE should be left set, and interrupts | ||
287 | * controlled exclusively via IXMT. | ||
288 | */ | ||
237 | 289 | ||
290 | #ifdef CONFIG_MIPS_MT_SMTC | ||
291 | #define STATMASK 0x1e | ||
292 | #else | ||
293 | #define STATMASK 0x1f | ||
294 | #endif | ||
238 | .macro RESTORE_SOME | 295 | .macro RESTORE_SOME |
239 | .set push | 296 | .set push |
240 | .set reorder | 297 | .set reorder |
241 | .set noat | 298 | .set noat |
299 | #ifdef CONFIG_MIPS_MT_SMTC | ||
300 | .set mips32r2 | ||
301 | /* | ||
302 | * This may not really be necessary if ints are already | ||
303 | * inhibited here. | ||
304 | */ | ||
305 | mfc0 v0, CP0_TCSTATUS | ||
306 | ori v0, TCSTATUS_IXMT | ||
307 | mtc0 v0, CP0_TCSTATUS | ||
308 | ehb | ||
309 | DMT 5 # dmt a1 | ||
310 | jal mips_ihb | ||
311 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
242 | mfc0 a0, CP0_STATUS | 312 | mfc0 a0, CP0_STATUS |
243 | ori a0, 0x1f | 313 | ori a0, STATMASK |
244 | xori a0, 0x1f | 314 | xori a0, STATMASK |
245 | mtc0 a0, CP0_STATUS | 315 | mtc0 a0, CP0_STATUS |
246 | li v1, 0xff00 | 316 | li v1, 0xff00 |
247 | and a0, v1 | 317 | and a0, v1 |
@@ -250,6 +320,26 @@ | |||
250 | and v0, v1 | 320 | and v0, v1 |
251 | or v0, a0 | 321 | or v0, a0 |
252 | mtc0 v0, CP0_STATUS | 322 | mtc0 v0, CP0_STATUS |
323 | #ifdef CONFIG_MIPS_MT_SMTC | ||
324 | /* | ||
325 | * Only after EXL/ERL have been restored to status can we | ||
326 | * restore TCStatus.IXMT. | ||
327 | */ | ||
328 | LONG_L v1, PT_TCSTATUS(sp) | ||
329 | ehb | ||
330 | mfc0 v0, CP0_TCSTATUS | ||
331 | andi v1, TCSTATUS_IXMT | ||
332 | /* We know that TCStatua.IXMT should be set from above */ | ||
333 | xori v0, v0, TCSTATUS_IXMT | ||
334 | or v0, v0, v1 | ||
335 | mtc0 v0, CP0_TCSTATUS | ||
336 | ehb | ||
337 | andi a1, a1, VPECONTROL_TE | ||
338 | beqz a1, 1f | ||
339 | emt | ||
340 | 1: | ||
341 | .set mips0 | ||
342 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
253 | LONG_L v1, PT_EPC(sp) | 343 | LONG_L v1, PT_EPC(sp) |
254 | MTC0 v1, CP0_EPC | 344 | MTC0 v1, CP0_EPC |
255 | LONG_L $31, PT_R31(sp) | 345 | LONG_L $31, PT_R31(sp) |
@@ -302,11 +392,33 @@ | |||
302 | * Set cp0 enable bit as sign that we're running on the kernel stack | 392 | * Set cp0 enable bit as sign that we're running on the kernel stack |
303 | */ | 393 | */ |
304 | .macro CLI | 394 | .macro CLI |
395 | #if !defined(CONFIG_MIPS_MT_SMTC) | ||
305 | mfc0 t0, CP0_STATUS | 396 | mfc0 t0, CP0_STATUS |
306 | li t1, ST0_CU0 | 0x1f | 397 | li t1, ST0_CU0 | 0x1f |
307 | or t0, t1 | 398 | or t0, t1 |
308 | xori t0, 0x1f | 399 | xori t0, 0x1f |
309 | mtc0 t0, CP0_STATUS | 400 | mtc0 t0, CP0_STATUS |
401 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
402 | /* | ||
403 | * For SMTC, we need to set privilege | ||
404 | * and disable interrupts only for the | ||
405 | * current TC, using the TCStatus register. | ||
406 | */ | ||
407 | mfc0 t0,CP0_TCSTATUS | ||
408 | /* Fortunately CU 0 is in the same place in both registers */ | ||
409 | /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ | ||
410 | li t1, ST0_CU0 | 0x08001c00 | ||
411 | or t0,t1 | ||
412 | /* Clear TKSU, leave IXMT */ | ||
413 | xori t0, 0x00001800 | ||
414 | mtc0 t0, CP0_TCSTATUS | ||
415 | ehb | ||
416 | /* We need to leave the global IE bit set, but clear EXL...*/ | ||
417 | mfc0 t0, CP0_STATUS | ||
418 | ori t0, ST0_EXL | ST0_ERL | ||
419 | xori t0, ST0_EXL | ST0_ERL | ||
420 | mtc0 t0, CP0_STATUS | ||
421 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
310 | irq_disable_hazard | 422 | irq_disable_hazard |
311 | .endm | 423 | .endm |
312 | 424 | ||
@@ -315,11 +427,35 @@ | |||
315 | * Set cp0 enable bit as sign that we're running on the kernel stack | 427 | * Set cp0 enable bit as sign that we're running on the kernel stack |
316 | */ | 428 | */ |
317 | .macro STI | 429 | .macro STI |
430 | #if !defined(CONFIG_MIPS_MT_SMTC) | ||
318 | mfc0 t0, CP0_STATUS | 431 | mfc0 t0, CP0_STATUS |
319 | li t1, ST0_CU0 | 0x1f | 432 | li t1, ST0_CU0 | 0x1f |
320 | or t0, t1 | 433 | or t0, t1 |
321 | xori t0, 0x1e | 434 | xori t0, 0x1e |
322 | mtc0 t0, CP0_STATUS | 435 | mtc0 t0, CP0_STATUS |
436 | #else /* CONFIG_MIPS_MT_SMTC */ | ||
437 | /* | ||
438 | * For SMTC, we need to set privilege | ||
439 | * and enable interrupts only for the | ||
440 | * current TC, using the TCStatus register. | ||
441 | */ | ||
442 | ehb | ||
443 | mfc0 t0,CP0_TCSTATUS | ||
444 | /* Fortunately CU 0 is in the same place in both registers */ | ||
445 | /* Set TCU0, TKSU (for later inversion) and IXMT */ | ||
446 | li t1, ST0_CU0 | 0x08001c00 | ||
447 | or t0,t1 | ||
448 | /* Clear TKSU *and* IXMT */ | ||
449 | xori t0, 0x00001c00 | ||
450 | mtc0 t0, CP0_TCSTATUS | ||
451 | ehb | ||
452 | /* We need to leave the global IE bit set, but clear EXL...*/ | ||
453 | mfc0 t0, CP0_STATUS | ||
454 | ori t0, ST0_EXL | ||
455 | xori t0, ST0_EXL | ||
456 | mtc0 t0, CP0_STATUS | ||
457 | /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */ | ||
458 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
323 | irq_enable_hazard | 459 | irq_enable_hazard |
324 | .endm | 460 | .endm |
325 | 461 | ||
@@ -328,11 +464,56 @@ | |||
328 | * Set cp0 enable bit as sign that we're running on the kernel stack | 464 | * Set cp0 enable bit as sign that we're running on the kernel stack |
329 | */ | 465 | */ |
330 | .macro KMODE | 466 | .macro KMODE |
467 | #ifdef CONFIG_MIPS_MT_SMTC | ||
468 | /* | ||
469 | * This gets baroque in SMTC. We want to | ||
470 | * protect the non-atomic clearing of EXL | ||
471 | * with DMT/EMT, but we don't want to take | ||
472 | * an interrupt while DMT is still in effect. | ||
473 | */ | ||
474 | |||
475 | /* KMODE gets invoked from both reorder and noreorder code */ | ||
476 | .set push | ||
477 | .set mips32r2 | ||
478 | .set noreorder | ||
479 | mfc0 v0, CP0_TCSTATUS | ||
480 | andi v1, v0, TCSTATUS_IXMT | ||
481 | ori v0, TCSTATUS_IXMT | ||
482 | mtc0 v0, CP0_TCSTATUS | ||
483 | ehb | ||
484 | DMT 2 # dmt v0 | ||
485 | /* | ||
486 | * We don't know a priori if ra is "live" | ||
487 | */ | ||
488 | move t0, ra | ||
489 | jal mips_ihb | ||
490 | nop /* delay slot */ | ||
491 | move ra, t0 | ||
492 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
331 | mfc0 t0, CP0_STATUS | 493 | mfc0 t0, CP0_STATUS |
332 | li t1, ST0_CU0 | 0x1e | 494 | li t1, ST0_CU0 | 0x1e |
333 | or t0, t1 | 495 | or t0, t1 |
334 | xori t0, 0x1e | 496 | xori t0, 0x1e |
335 | mtc0 t0, CP0_STATUS | 497 | mtc0 t0, CP0_STATUS |
498 | #ifdef CONFIG_MIPS_MT_SMTC | ||
499 | ehb | ||
500 | andi v0, v0, VPECONTROL_TE | ||
501 | beqz v0, 2f | ||
502 | nop /* delay slot */ | ||
503 | emt | ||
504 | 2: | ||
505 | mfc0 v0, CP0_TCSTATUS | ||
506 | /* Clear IXMT, then OR in previous value */ | ||
507 | ori v0, TCSTATUS_IXMT | ||
508 | xori v0, TCSTATUS_IXMT | ||
509 | or v0, v1, v0 | ||
510 | mtc0 v0, CP0_TCSTATUS | ||
511 | /* | ||
512 | * irq_disable_hazard below should expand to EHB | ||
513 | * on 24K/34K CPUS | ||
514 | */ | ||
515 | .set pop | ||
516 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
336 | irq_disable_hazard | 517 | irq_disable_hazard |
337 | .endm | 518 | .endm |
338 | 519 | ||
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 4097fac5ac3c..261f71d16a07 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h | |||
@@ -155,6 +155,37 @@ extern asmlinkage void *resume(void *last, void *next, void *next_ti); | |||
155 | 155 | ||
156 | struct task_struct; | 156 | struct task_struct; |
157 | 157 | ||
158 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
159 | |||
160 | /* | ||
161 | * Handle the scheduler resume end of FPU affinity management. We do this | ||
162 | * inline to try to keep the overhead down. If we have been forced to run on | ||
163 | * a "CPU" with an FPU because of a previous high level of FP computation, | ||
164 | * but did not actually use the FPU during the most recent time-slice (CU1 | ||
165 | * isn't set), we undo the restriction on cpus_allowed. | ||
166 | * | ||
167 | * We're not calling set_cpus_allowed() here, because we have no need to | ||
168 | * force prompt migration - we're already switching the current CPU to a | ||
169 | * different thread. | ||
170 | */ | ||
171 | |||
172 | #define switch_to(prev,next,last) \ | ||
173 | do { \ | ||
174 | if (cpu_has_fpu && \ | ||
175 | (prev->thread.mflags & MF_FPUBOUND) && \ | ||
176 | (!(KSTK_STATUS(prev) & ST0_CU1))) { \ | ||
177 | prev->thread.mflags &= ~MF_FPUBOUND; \ | ||
178 | prev->cpus_allowed = prev->thread.user_cpus_allowed; \ | ||
179 | } \ | ||
180 | if (cpu_has_dsp) \ | ||
181 | __save_dsp(prev); \ | ||
182 | next->thread.emulated_fp = 0; \ | ||
183 | (last) = resume(prev, next, next->thread_info); \ | ||
184 | if (cpu_has_dsp) \ | ||
185 | __restore_dsp(current); \ | ||
186 | } while(0) | ||
187 | |||
188 | #else | ||
158 | #define switch_to(prev,next,last) \ | 189 | #define switch_to(prev,next,last) \ |
159 | do { \ | 190 | do { \ |
160 | if (cpu_has_dsp) \ | 191 | if (cpu_has_dsp) \ |
@@ -163,6 +194,7 @@ do { \ | |||
163 | if (cpu_has_dsp) \ | 194 | if (cpu_has_dsp) \ |
164 | __restore_dsp(current); \ | 195 | __restore_dsp(current); \ |
165 | } while(0) | 196 | } while(0) |
197 | #endif | ||
166 | 198 | ||
167 | /* | 199 | /* |
168 | * On SMP systems, when the scheduler does migration-cost autodetection, | 200 | * On SMP systems, when the scheduler does migration-cost autodetection, |
@@ -440,8 +472,8 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, | |||
440 | extern void set_handler (unsigned long offset, void *addr, unsigned long len); | 472 | extern void set_handler (unsigned long offset, void *addr, unsigned long len); |
441 | extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); | 473 | extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); |
442 | extern void *set_vi_handler (int n, void *addr); | 474 | extern void *set_vi_handler (int n, void *addr); |
443 | extern void *set_vi_srs_handler (int n, void *addr, int regset); | ||
444 | extern void *set_except_vector(int n, void *addr); | 475 | extern void *set_except_vector(int n, void *addr); |
476 | extern unsigned long ebase; | ||
445 | extern void per_cpu_trap_init(void); | 477 | extern void per_cpu_trap_init(void); |
446 | 478 | ||
447 | extern NORET_TYPE void die(const char *, struct pt_regs *); | 479 | extern NORET_TYPE void die(const char *, struct pt_regs *); |
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h index b5c78a4a0192..1068fe9a0a58 100644 --- a/include/asm-mips/unistd.h +++ b/include/asm-mips/unistd.h | |||
@@ -324,16 +324,18 @@ | |||
324 | #define __NR_pselect6 (__NR_Linux + 301) | 324 | #define __NR_pselect6 (__NR_Linux + 301) |
325 | #define __NR_ppoll (__NR_Linux + 302) | 325 | #define __NR_ppoll (__NR_Linux + 302) |
326 | #define __NR_unshare (__NR_Linux + 303) | 326 | #define __NR_unshare (__NR_Linux + 303) |
327 | #define __NR_splice (__NR_Linux + 304) | ||
328 | #define __NR_sync_file_range (__NR_Linux + 305) | ||
327 | 329 | ||
328 | /* | 330 | /* |
329 | * Offset of the last Linux o32 flavoured syscall | 331 | * Offset of the last Linux o32 flavoured syscall |
330 | */ | 332 | */ |
331 | #define __NR_Linux_syscalls 303 | 333 | #define __NR_Linux_syscalls 305 |
332 | 334 | ||
333 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | 335 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
334 | 336 | ||
335 | #define __NR_O32_Linux 4000 | 337 | #define __NR_O32_Linux 4000 |
336 | #define __NR_O32_Linux_syscalls 303 | 338 | #define __NR_O32_Linux_syscalls 305 |
337 | 339 | ||
338 | #if _MIPS_SIM == _MIPS_SIM_ABI64 | 340 | #if _MIPS_SIM == _MIPS_SIM_ABI64 |
339 | 341 | ||
@@ -604,16 +606,18 @@ | |||
604 | #define __NR_pselect6 (__NR_Linux + 260) | 606 | #define __NR_pselect6 (__NR_Linux + 260) |
605 | #define __NR_ppoll (__NR_Linux + 261) | 607 | #define __NR_ppoll (__NR_Linux + 261) |
606 | #define __NR_unshare (__NR_Linux + 262) | 608 | #define __NR_unshare (__NR_Linux + 262) |
609 | #define __NR_splice (__NR_Linux + 263) | ||
610 | #define __NR_sync_file_range (__NR_Linux + 264) | ||
607 | 611 | ||
608 | /* | 612 | /* |
609 | * Offset of the last Linux 64-bit flavoured syscall | 613 | * Offset of the last Linux 64-bit flavoured syscall |
610 | */ | 614 | */ |
611 | #define __NR_Linux_syscalls 262 | 615 | #define __NR_Linux_syscalls 264 |
612 | 616 | ||
613 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ | 617 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ |
614 | 618 | ||
615 | #define __NR_64_Linux 5000 | 619 | #define __NR_64_Linux 5000 |
616 | #define __NR_64_Linux_syscalls 262 | 620 | #define __NR_64_Linux_syscalls 264 |
617 | 621 | ||
618 | #if _MIPS_SIM == _MIPS_SIM_NABI32 | 622 | #if _MIPS_SIM == _MIPS_SIM_NABI32 |
619 | 623 | ||
@@ -888,16 +892,18 @@ | |||
888 | #define __NR_pselect6 (__NR_Linux + 264) | 892 | #define __NR_pselect6 (__NR_Linux + 264) |
889 | #define __NR_ppoll (__NR_Linux + 265) | 893 | #define __NR_ppoll (__NR_Linux + 265) |
890 | #define __NR_unshare (__NR_Linux + 266) | 894 | #define __NR_unshare (__NR_Linux + 266) |
895 | #define __NR_splice (__NR_Linux + 267) | ||
896 | #define __NR_sync_file_range (__NR_Linux + 268) | ||
891 | 897 | ||
892 | /* | 898 | /* |
893 | * Offset of the last N32 flavoured syscall | 899 | * Offset of the last N32 flavoured syscall |
894 | */ | 900 | */ |
895 | #define __NR_Linux_syscalls 266 | 901 | #define __NR_Linux_syscalls 268 |
896 | 902 | ||
897 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ | 903 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ |
898 | 904 | ||
899 | #define __NR_N32_Linux 6000 | 905 | #define __NR_N32_Linux 6000 |
900 | #define __NR_N32_Linux_syscalls 266 | 906 | #define __NR_N32_Linux_syscalls 268 |
901 | 907 | ||
902 | #ifndef __ASSEMBLY__ | 908 | #ifndef __ASSEMBLY__ |
903 | 909 | ||
diff --git a/include/asm-mips/vpe.h b/include/asm-mips/vpe.h new file mode 100644 index 000000000000..c6e1b961537d --- /dev/null +++ b/include/asm-mips/vpe.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can distribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License (Version 2) as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
11 | * for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
15 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #ifndef _ASM_VPE_H | ||
20 | #define _ASM_VPE_H | ||
21 | |||
22 | struct vpe_notifications { | ||
23 | void (*start)(int vpe); | ||
24 | void (*stop)(int vpe); | ||
25 | |||
26 | struct list_head list; | ||
27 | }; | ||
28 | |||
29 | |||
30 | extern int vpe_notify(int index, struct vpe_notifications *notify); | ||
31 | |||
32 | extern void *vpe_get_shared(int index); | ||
33 | extern int vpe_getuid(int index); | ||
34 | extern int vpe_getgid(int index); | ||
35 | extern char *vpe_getcwd(int index); | ||
36 | |||
37 | #endif /* _ASM_VPE_H */ | ||