aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/mmu_context.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/mmu_context.h')
-rw-r--r--arch/powerpc/include/asm/mmu_context.h257
1 files changed, 34 insertions, 223 deletions
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 6b993ef452f..ab4f19263c4 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -2,237 +2,26 @@
2#define __ASM_POWERPC_MMU_CONTEXT_H 2#define __ASM_POWERPC_MMU_CONTEXT_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/spinlock.h>
5#include <asm/mmu.h> 9#include <asm/mmu.h>
6#include <asm/cputable.h> 10#include <asm/cputable.h>
7#include <asm-generic/mm_hooks.h> 11#include <asm-generic/mm_hooks.h>
8 12#include <asm/cputhreads.h>
9#ifndef CONFIG_PPC64
10#include <asm/atomic.h>
11#include <linux/bitops.h>
12
13/*
14 * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
15 * (virtual segment identifiers) for each context. Although the
16 * hardware supports 24-bit VSIDs, and thus >1 million contexts,
17 * we only use 32,768 of them. That is ample, since there can be
18 * at most around 30,000 tasks in the system anyway, and it means
19 * that we can use a bitmap to indicate which contexts are in use.
20 * Using a bitmap means that we entirely avoid all of the problems
21 * that we used to have when the context number overflowed,
22 * particularly on SMP systems.
23 * -- paulus.
24 */
25
26/*
27 * This function defines the mapping from contexts to VSIDs (virtual
28 * segment IDs). We use a skew on both the context and the high 4 bits
29 * of the 32-bit virtual address (the "effective segment ID") in order
30 * to spread out the entries in the MMU hash table. Note, if this
31 * function is changed then arch/ppc/mm/hashtable.S will have to be
32 * changed to correspond.
33 */
34#define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
35 & 0xffffff)
36
37/*
38 The MPC8xx has only 16 contexts. We rotate through them on each
39 task switch. A better way would be to keep track of tasks that
40 own contexts, and implement an LRU usage. That way very active
41 tasks don't always have to pay the TLB reload overhead. The
42 kernel pages are mapped shared, so the kernel can run on behalf
43 of any task that makes a kernel entry. Shared does not mean they
44 are not protected, just that the ASID comparison is not performed.
45 -- Dan
46
47 The IBM4xx has 256 contexts, so we can just rotate through these
48 as a way of "switching" contexts. If the TID of the TLB is zero,
49 the PID/TID comparison is disabled, so we can use a TID of zero
50 to represent all kernel pages as shared among all contexts.
51 -- Dan
52 */
53
54static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
55{
56}
57
58#ifdef CONFIG_8xx
59#define NO_CONTEXT 16
60#define LAST_CONTEXT 15
61#define FIRST_CONTEXT 0
62
63#elif defined(CONFIG_4xx)
64#define NO_CONTEXT 256
65#define LAST_CONTEXT 255
66#define FIRST_CONTEXT 1
67
68#elif defined(CONFIG_E200) || defined(CONFIG_E500)
69#define NO_CONTEXT 256
70#define LAST_CONTEXT 255
71#define FIRST_CONTEXT 1
72
73#else
74
75/* PPC 6xx, 7xx CPUs */
76#define NO_CONTEXT ((unsigned long) -1)
77#define LAST_CONTEXT 32767
78#define FIRST_CONTEXT 1
79#endif
80
81/*
82 * Set the current MMU context.
83 * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
84 * loading up the segment registers for the user part of the address space.
85 *
86 * Since the PGD is immediately available, it is much faster to simply
87 * pass this along as a second parameter, which is required for 8xx and
88 * can be used for debugging on all processors (if you happen to have
89 * an Abatron).
90 */
91extern void set_context(unsigned long contextid, pgd_t *pgd);
92
93/*
94 * Bitmap of contexts in use.
95 * The size of this bitmap is LAST_CONTEXT + 1 bits.
96 */
97extern unsigned long context_map[];
98
99/*
100 * This caches the next context number that we expect to be free.
101 * Its use is an optimization only, we can't rely on this context
102 * number to be free, but it usually will be.
103 */
104extern unsigned long next_mmu_context;
105
106/*
107 * If we don't have sufficient contexts to give one to every task
108 * that could be in the system, we need to be able to steal contexts.
109 * These variables support that.
110 */
111#if LAST_CONTEXT < 30000
112#define FEW_CONTEXTS 1
113extern atomic_t nr_free_contexts;
114extern struct mm_struct *context_mm[LAST_CONTEXT+1];
115extern void steal_context(void);
116#endif
117
118/*
119 * Get a new mmu context for the address space described by `mm'.
120 */
121static inline void get_mmu_context(struct mm_struct *mm)
122{
123 unsigned long ctx;
124
125 if (mm->context.id != NO_CONTEXT)
126 return;
127#ifdef FEW_CONTEXTS
128 while (atomic_dec_if_positive(&nr_free_contexts) < 0)
129 steal_context();
130#endif
131 ctx = next_mmu_context;
132 while (test_and_set_bit(ctx, context_map)) {
133 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
134 if (ctx > LAST_CONTEXT)
135 ctx = 0;
136 }
137 next_mmu_context = (ctx + 1) & LAST_CONTEXT;
138 mm->context.id = ctx;
139#ifdef FEW_CONTEXTS
140 context_mm[ctx] = mm;
141#endif
142}
143
144/*
145 * Set up the context for a new address space.
146 */
147static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
148{
149 mm->context.id = NO_CONTEXT;
150 return 0;
151}
152
153/*
154 * We're finished using the context for an address space.
155 */
156static inline void destroy_context(struct mm_struct *mm)
157{
158 preempt_disable();
159 if (mm->context.id != NO_CONTEXT) {
160 clear_bit(mm->context.id, context_map);
161 mm->context.id = NO_CONTEXT;
162#ifdef FEW_CONTEXTS
163 atomic_inc(&nr_free_contexts);
164#endif
165 }
166 preempt_enable();
167}
168
169static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
170 struct task_struct *tsk)
171{
172#ifdef CONFIG_ALTIVEC
173 if (cpu_has_feature(CPU_FTR_ALTIVEC))
174 asm volatile ("dssall;\n"
175#ifndef CONFIG_POWER4
176 "sync;\n" /* G4 needs a sync here, G5 apparently not */
177#endif
178 : : );
179#endif /* CONFIG_ALTIVEC */
180
181 tsk->thread.pgdir = next->pgd;
182
183 /* No need to flush userspace segments if the mm doesnt change */
184 if (prev == next)
185 return;
186
187 /* Setup new userspace context */
188 get_mmu_context(next);
189 set_context(next->context.id, next->pgd);
190}
191
192#define deactivate_mm(tsk,mm) do { } while (0)
193 13
194/* 14/*
195 * After we have set current->mm to a new value, this activates 15 * Most if the context management is out of line
196 * the context for the new mm so we see the new mappings.
197 */ 16 */
198#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, current)
199
200extern void mmu_context_init(void); 17extern void mmu_context_init(void);
201
202
203#else
204
205#include <linux/kernel.h>
206#include <linux/mm.h>
207#include <linux/sched.h>
208
209/*
210 * Copyright (C) 2001 PPC 64 Team, IBM Corp
211 *
212 * This program is free software; you can redistribute it and/or
213 * modify it under the terms of the GNU General Public License
214 * as published by the Free Software Foundation; either version
215 * 2 of the License, or (at your option) any later version.
216 */
217
218static inline void enter_lazy_tlb(struct mm_struct *mm,
219 struct task_struct *tsk)
220{
221}
222
223/*
224 * The proto-VSID space has 2^35 - 1 segments available for user mappings.
225 * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
226 * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
227 */
228#define NO_CONTEXT 0
229#define MAX_CONTEXT ((1UL << 19) - 1)
230
231extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 18extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
232extern void destroy_context(struct mm_struct *mm); 19extern void destroy_context(struct mm_struct *mm);
233 20
21extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
234extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm); 22extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
235extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 23extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
24extern void set_context(unsigned long id, pgd_t *pgd);
236 25
237/* 26/*
238 * switch_mm is the entry point called from the architecture independent 27 * switch_mm is the entry point called from the architecture independent
@@ -241,22 +30,39 @@ extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
241static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 30static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
242 struct task_struct *tsk) 31 struct task_struct *tsk)
243{ 32{
244 if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask)) 33 /* Mark this context has been used on the new CPU */
245 cpu_set(smp_processor_id(), next->cpu_vm_mask); 34 cpu_set(smp_processor_id(), next->cpu_vm_mask);
35
36 /* 32-bit keeps track of the current PGDIR in the thread struct */
37#ifdef CONFIG_PPC32
38 tsk->thread.pgdir = next->pgd;
39#endif /* CONFIG_PPC32 */
246 40
247 /* No need to flush userspace segments if the mm doesnt change */ 41 /* Nothing else to do if we aren't actually switching */
248 if (prev == next) 42 if (prev == next)
249 return; 43 return;
250 44
45 /* We must stop all altivec streams before changing the HW
46 * context
47 */
251#ifdef CONFIG_ALTIVEC 48#ifdef CONFIG_ALTIVEC
252 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 49 if (cpu_has_feature(CPU_FTR_ALTIVEC))
253 asm volatile ("dssall"); 50 asm volatile ("dssall");
254#endif /* CONFIG_ALTIVEC */ 51#endif /* CONFIG_ALTIVEC */
255 52
53 /* The actual HW switching method differs between the various
54 * sub architectures.
55 */
56#ifdef CONFIG_PPC_STD_MMU_64
256 if (cpu_has_feature(CPU_FTR_SLB)) 57 if (cpu_has_feature(CPU_FTR_SLB))
257 switch_slb(tsk, next); 58 switch_slb(tsk, next);
258 else 59 else
259 switch_stab(tsk, next); 60 switch_stab(tsk, next);
61#else
62 /* Out of line for now */
63 switch_mmu_context(prev, next);
64#endif
65
260} 66}
261 67
262#define deactivate_mm(tsk,mm) do { } while (0) 68#define deactivate_mm(tsk,mm) do { } while (0)
@@ -274,6 +80,11 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
274 local_irq_restore(flags); 80 local_irq_restore(flags);
275} 81}
276 82
277#endif /* CONFIG_PPC64 */ 83/* We don't currently use enter_lazy_tlb() for anything */
84static inline void enter_lazy_tlb(struct mm_struct *mm,
85 struct task_struct *tsk)
86{
87}
88
278#endif /* __KERNEL__ */ 89#endif /* __KERNEL__ */
279#endif /* __ASM_POWERPC_MMU_CONTEXT_H */ 90#endif /* __ASM_POWERPC_MMU_CONTEXT_H */