diff options
author | Paul Mundt <lethal@linux-sh.org> | 2008-07-28 19:09:44 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-07-28 19:09:44 -0400 |
commit | f15cbe6f1a4b4d9df59142fc8e4abb973302cf44 (patch) | |
tree | 774d7b11abaaf33561ab8268bf51ddd9ceb79025 /include/asm-sh/mmu_context.h | |
parent | 25326277d8d1393d1c66240e6255aca780f9e3eb (diff) |
sh: migrate to arch/sh/include/
This follows the sparc changes a439fe51a1f8eb087c22dd24d69cebae4a3addac.
Most of the moving about was done with Sam's directions at:
http://marc.info/?l=linux-sh&m=121724823706062&w=2
with subsequent hacking and fixups entirely my fault.
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/mmu_context.h')
-rw-r--r-- | include/asm-sh/mmu_context.h | 185 |
1 files changed, 0 insertions, 185 deletions
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h deleted file mode 100644 index 8589a50febd0..000000000000 --- a/include/asm-sh/mmu_context.h +++ /dev/null | |||
@@ -1,185 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1999 Niibe Yutaka | ||
3 | * Copyright (C) 2003 - 2007 Paul Mundt | ||
4 | * | ||
5 | * ASID handling idea taken from MIPS implementation. | ||
6 | */ | ||
7 | #ifndef __ASM_SH_MMU_CONTEXT_H | ||
8 | #define __ASM_SH_MMU_CONTEXT_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | #include <asm/cpu/mmu_context.h> | ||
12 | #include <asm/tlbflush.h> | ||
13 | #include <asm/uaccess.h> | ||
14 | #include <asm/io.h> | ||
15 | #include <asm-generic/mm_hooks.h> | ||
16 | |||
17 | /* | ||
18 | * The MMU "context" consists of two things: | ||
19 | * (a) TLB cache version (or round, cycle whatever expression you like) | ||
20 | * (b) ASID (Address Space IDentifier) | ||
21 | */ | ||
22 | #define MMU_CONTEXT_ASID_MASK 0x000000ff | ||
23 | #define MMU_CONTEXT_VERSION_MASK 0xffffff00 | ||
24 | #define MMU_CONTEXT_FIRST_VERSION 0x00000100 | ||
25 | #define NO_CONTEXT 0 | ||
26 | |||
27 | /* ASID is 8-bit value, so it can't be 0x100 */ | ||
28 | #define MMU_NO_ASID 0x100 | ||
29 | |||
30 | #define asid_cache(cpu) (cpu_data[cpu].asid_cache) | ||
31 | |||
32 | #ifdef CONFIG_MMU | ||
33 | #define cpu_context(cpu, mm) ((mm)->context.id[cpu]) | ||
34 | |||
35 | #define cpu_asid(cpu, mm) \ | ||
36 | (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK) | ||
37 | |||
38 | /* | ||
39 | * Virtual Page Number mask | ||
40 | */ | ||
41 | #define MMU_VPN_MASK 0xfffff000 | ||
42 | |||
43 | #if defined(CONFIG_SUPERH32) | ||
44 | #include "mmu_context_32.h" | ||
45 | #else | ||
46 | #include "mmu_context_64.h" | ||
47 | #endif | ||
48 | |||
49 | /* | ||
50 | * Get MMU context if needed. | ||
51 | */ | ||
52 | static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) | ||
53 | { | ||
54 | unsigned long asid = asid_cache(cpu); | ||
55 | |||
56 | /* Check if we have old version of context. */ | ||
57 | if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) | ||
58 | /* It's up to date, do nothing */ | ||
59 | return; | ||
60 | |||
61 | /* It's old, we need to get new context with new version. */ | ||
62 | if (!(++asid & MMU_CONTEXT_ASID_MASK)) { | ||
63 | /* | ||
64 | * We exhaust ASID of this version. | ||
65 | * Flush all TLB and start new cycle. | ||
66 | */ | ||
67 | flush_tlb_all(); | ||
68 | |||
69 | #ifdef CONFIG_SUPERH64 | ||
70 | /* | ||
71 | * The SH-5 cache uses the ASIDs, requiring both the I and D | ||
72 | * cache to be flushed when the ASID is exhausted. Weak. | ||
73 | */ | ||
74 | flush_cache_all(); | ||
75 | #endif | ||
76 | |||
77 | /* | ||
78 | * Fix version; Note that we avoid version #0 | ||
79 | * to distingush NO_CONTEXT. | ||
80 | */ | ||
81 | if (!asid) | ||
82 | asid = MMU_CONTEXT_FIRST_VERSION; | ||
83 | } | ||
84 | |||
85 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Initialize the context related info for a new mm_struct | ||
90 | * instance. | ||
91 | */ | ||
92 | static inline int init_new_context(struct task_struct *tsk, | ||
93 | struct mm_struct *mm) | ||
94 | { | ||
95 | int i; | ||
96 | |||
97 | for (i = 0; i < num_online_cpus(); i++) | ||
98 | cpu_context(i, mm) = NO_CONTEXT; | ||
99 | |||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * After we have set current->mm to a new value, this activates | ||
105 | * the context for the new mm so we see the new mappings. | ||
106 | */ | ||
107 | static inline void activate_context(struct mm_struct *mm, unsigned int cpu) | ||
108 | { | ||
109 | get_mmu_context(mm, cpu); | ||
110 | set_asid(cpu_asid(cpu, mm)); | ||
111 | } | ||
112 | |||
113 | static inline void switch_mm(struct mm_struct *prev, | ||
114 | struct mm_struct *next, | ||
115 | struct task_struct *tsk) | ||
116 | { | ||
117 | unsigned int cpu = smp_processor_id(); | ||
118 | |||
119 | if (likely(prev != next)) { | ||
120 | cpu_set(cpu, next->cpu_vm_mask); | ||
121 | set_TTB(next->pgd); | ||
122 | activate_context(next, cpu); | ||
123 | } else | ||
124 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) | ||
125 | activate_context(next, cpu); | ||
126 | } | ||
127 | #else | ||
128 | #define get_mmu_context(mm) do { } while (0) | ||
129 | #define init_new_context(tsk,mm) (0) | ||
130 | #define destroy_context(mm) do { } while (0) | ||
131 | #define set_asid(asid) do { } while (0) | ||
132 | #define get_asid() (0) | ||
133 | #define cpu_asid(cpu, mm) ({ (void)cpu; 0; }) | ||
134 | #define switch_and_save_asid(asid) (0) | ||
135 | #define set_TTB(pgd) do { } while (0) | ||
136 | #define get_TTB() (0) | ||
137 | #define activate_context(mm,cpu) do { } while (0) | ||
138 | #define switch_mm(prev,next,tsk) do { } while (0) | ||
139 | #endif /* CONFIG_MMU */ | ||
140 | |||
141 | #define activate_mm(prev, next) switch_mm((prev),(next),NULL) | ||
142 | #define deactivate_mm(tsk,mm) do { } while (0) | ||
143 | #define enter_lazy_tlb(mm,tsk) do { } while (0) | ||
144 | |||
145 | #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) | ||
146 | /* | ||
147 | * If this processor has an MMU, we need methods to turn it off/on .. | ||
148 | * paging_init() will also have to be updated for the processor in | ||
149 | * question. | ||
150 | */ | ||
151 | static inline void enable_mmu(void) | ||
152 | { | ||
153 | unsigned int cpu = smp_processor_id(); | ||
154 | |||
155 | /* Enable MMU */ | ||
156 | ctrl_outl(MMU_CONTROL_INIT, MMUCR); | ||
157 | ctrl_barrier(); | ||
158 | |||
159 | if (asid_cache(cpu) == NO_CONTEXT) | ||
160 | asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION; | ||
161 | |||
162 | set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK); | ||
163 | } | ||
164 | |||
165 | static inline void disable_mmu(void) | ||
166 | { | ||
167 | unsigned long cr; | ||
168 | |||
169 | cr = ctrl_inl(MMUCR); | ||
170 | cr &= ~MMU_CONTROL_INIT; | ||
171 | ctrl_outl(cr, MMUCR); | ||
172 | |||
173 | ctrl_barrier(); | ||
174 | } | ||
175 | #else | ||
176 | /* | ||
177 | * MMU control handlers for processors lacking memory | ||
178 | * management hardware. | ||
179 | */ | ||
180 | #define enable_mmu() do { } while (0) | ||
181 | #define disable_mmu() do { } while (0) | ||
182 | #endif | ||
183 | |||
184 | #endif /* __KERNEL__ */ | ||
185 | #endif /* __ASM_SH_MMU_CONTEXT_H */ | ||