diff options
Diffstat (limited to 'arch/m32r/include/asm/mmu_context.h')
-rw-r--r-- | arch/m32r/include/asm/mmu_context.h | 164 |
1 files changed, 164 insertions, 0 deletions
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h new file mode 100644 index 000000000000..91909e5dd9d0 --- /dev/null +++ b/arch/m32r/include/asm/mmu_context.h | |||
@@ -0,0 +1,164 @@ | |||
1 | #ifndef _ASM_M32R_MMU_CONTEXT_H | ||
2 | #define _ASM_M32R_MMU_CONTEXT_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | #include <asm/m32r.h> | ||
6 | |||
7 | #define MMU_CONTEXT_ASID_MASK (0x000000FF) | ||
8 | #define MMU_CONTEXT_VERSION_MASK (0xFFFFFF00) | ||
9 | #define MMU_CONTEXT_FIRST_VERSION (0x00000100) | ||
10 | #define NO_CONTEXT (0x00000000) | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | |||
14 | #include <asm/atomic.h> | ||
15 | #include <asm/pgalloc.h> | ||
16 | #include <asm/mmu.h> | ||
17 | #include <asm/tlbflush.h> | ||
18 | #include <asm-generic/mm_hooks.h> | ||
19 | |||
20 | /* | ||
21 | * Cache of MMU context last used. | ||
22 | */ | ||
23 | #ifndef CONFIG_SMP | ||
24 | extern unsigned long mmu_context_cache_dat; | ||
25 | #define mmu_context_cache mmu_context_cache_dat | ||
26 | #define mm_context(mm) mm->context | ||
27 | #else /* not CONFIG_SMP */ | ||
28 | extern unsigned long mmu_context_cache_dat[]; | ||
29 | #define mmu_context_cache mmu_context_cache_dat[smp_processor_id()] | ||
30 | #define mm_context(mm) mm->context[smp_processor_id()] | ||
31 | #endif /* not CONFIG_SMP */ | ||
32 | |||
33 | #define set_tlb_tag(entry, tag) (*entry = (tag & PAGE_MASK)|get_asid()) | ||
34 | #define set_tlb_data(entry, data) (*entry = (data | _PAGE_PRESENT)) | ||
35 | |||
36 | #ifdef CONFIG_MMU | ||
37 | #define enter_lazy_tlb(mm, tsk) do { } while (0) | ||
38 | |||
39 | static inline void get_new_mmu_context(struct mm_struct *mm) | ||
40 | { | ||
41 | unsigned long mc = ++mmu_context_cache; | ||
42 | |||
43 | if (!(mc & MMU_CONTEXT_ASID_MASK)) { | ||
44 | /* We exhaust ASID of this version. | ||
45 | Flush all TLB and start new cycle. */ | ||
46 | local_flush_tlb_all(); | ||
47 | /* Fix version if needed. | ||
48 | Note that we avoid version #0 to distingush NO_CONTEXT. */ | ||
49 | if (!mc) | ||
50 | mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; | ||
51 | } | ||
52 | mm_context(mm) = mc; | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * Get MMU context if needed. | ||
57 | */ | ||
58 | static inline void get_mmu_context(struct mm_struct *mm) | ||
59 | { | ||
60 | if (mm) { | ||
61 | unsigned long mc = mmu_context_cache; | ||
62 | |||
63 | /* Check if we have old version of context. | ||
64 | If it's old, we need to get new context with new version. */ | ||
65 | if ((mm_context(mm) ^ mc) & MMU_CONTEXT_VERSION_MASK) | ||
66 | get_new_mmu_context(mm); | ||
67 | } | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Initialize the context related info for a new mm_struct | ||
72 | * instance. | ||
73 | */ | ||
74 | static inline int init_new_context(struct task_struct *tsk, | ||
75 | struct mm_struct *mm) | ||
76 | { | ||
77 | #ifndef CONFIG_SMP | ||
78 | mm->context = NO_CONTEXT; | ||
79 | #else /* CONFIG_SMP */ | ||
80 | int num_cpus = num_online_cpus(); | ||
81 | int i; | ||
82 | |||
83 | for (i = 0 ; i < num_cpus ; i++) | ||
84 | mm->context[i] = NO_CONTEXT; | ||
85 | #endif /* CONFIG_SMP */ | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Destroy context related info for an mm_struct that is about | ||
92 | * to be put to rest. | ||
93 | */ | ||
94 | #define destroy_context(mm) do { } while (0) | ||
95 | |||
96 | static inline void set_asid(unsigned long asid) | ||
97 | { | ||
98 | *(volatile unsigned long *)MASID = (asid & MMU_CONTEXT_ASID_MASK); | ||
99 | } | ||
100 | |||
101 | static inline unsigned long get_asid(void) | ||
102 | { | ||
103 | unsigned long asid; | ||
104 | |||
105 | asid = *(volatile long *)MASID; | ||
106 | asid &= MMU_CONTEXT_ASID_MASK; | ||
107 | |||
108 | return asid; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * After we have set current->mm to a new value, this activates | ||
113 | * the context for the new mm so we see the new mappings. | ||
114 | */ | ||
115 | static inline void activate_context(struct mm_struct *mm) | ||
116 | { | ||
117 | get_mmu_context(mm); | ||
118 | set_asid(mm_context(mm) & MMU_CONTEXT_ASID_MASK); | ||
119 | } | ||
120 | |||
121 | static inline void switch_mm(struct mm_struct *prev, | ||
122 | struct mm_struct *next, struct task_struct *tsk) | ||
123 | { | ||
124 | #ifdef CONFIG_SMP | ||
125 | int cpu = smp_processor_id(); | ||
126 | #endif /* CONFIG_SMP */ | ||
127 | |||
128 | if (prev != next) { | ||
129 | #ifdef CONFIG_SMP | ||
130 | cpu_set(cpu, next->cpu_vm_mask); | ||
131 | #endif /* CONFIG_SMP */ | ||
132 | /* Set MPTB = next->pgd */ | ||
133 | *(volatile unsigned long *)MPTB = (unsigned long)next->pgd; | ||
134 | activate_context(next); | ||
135 | } | ||
136 | #ifdef CONFIG_SMP | ||
137 | else | ||
138 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) | ||
139 | activate_context(next); | ||
140 | #endif /* CONFIG_SMP */ | ||
141 | } | ||
142 | |||
143 | #define deactivate_mm(tsk, mm) do { } while (0) | ||
144 | |||
145 | #define activate_mm(prev, next) \ | ||
146 | switch_mm((prev), (next), NULL) | ||
147 | |||
148 | #else /* not CONFIG_MMU */ | ||
149 | #define get_mmu_context(mm) do { } while (0) | ||
150 | #define init_new_context(tsk,mm) (0) | ||
151 | #define destroy_context(mm) do { } while (0) | ||
152 | #define set_asid(asid) do { } while (0) | ||
153 | #define get_asid() (0) | ||
154 | #define activate_context(mm) do { } while (0) | ||
155 | #define switch_mm(prev,next,tsk) do { } while (0) | ||
156 | #define deactivate_mm(mm,tsk) do { } while (0) | ||
157 | #define activate_mm(prev,next) do { } while (0) | ||
158 | #define enter_lazy_tlb(mm,tsk) do { } while (0) | ||
159 | #endif /* not CONFIG_MMU */ | ||
160 | |||
161 | #endif /* not __ASSEMBLY__ */ | ||
162 | |||
163 | #endif /* __KERNEL__ */ | ||
164 | #endif /* _ASM_M32R_MMU_CONTEXT_H */ | ||