diff options
Diffstat (limited to 'arch/mn10300/include/asm/mmu_context.h')
-rw-r--r-- | arch/mn10300/include/asm/mmu_context.h | 73 |
1 files changed, 48 insertions, 25 deletions
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h index cb294c244de3..c8f6c82672ad 100644 --- a/arch/mn10300/include/asm/mmu_context.h +++ b/arch/mn10300/include/asm/mmu_context.h | |||
@@ -27,28 +27,38 @@ | |||
27 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
28 | #include <asm-generic/mm_hooks.h> | 28 | #include <asm-generic/mm_hooks.h> |
29 | 29 | ||
30 | #define MMU_CONTEXT_TLBPID_NR 256 | ||
30 | #define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL | 31 | #define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL |
31 | #define MMU_CONTEXT_VERSION_MASK 0xffffff00UL | 32 | #define MMU_CONTEXT_VERSION_MASK 0xffffff00UL |
32 | #define MMU_CONTEXT_FIRST_VERSION 0x00000100UL | 33 | #define MMU_CONTEXT_FIRST_VERSION 0x00000100UL |
33 | #define MMU_NO_CONTEXT 0x00000000UL | 34 | #define MMU_NO_CONTEXT 0x00000000UL |
34 | 35 | #define MMU_CONTEXT_TLBPID_LOCK_NR 0 | |
35 | extern unsigned long mmu_context_cache[NR_CPUS]; | ||
36 | #define mm_context(mm) (mm->context.tlbpid[smp_processor_id()]) | ||
37 | 36 | ||
38 | #define enter_lazy_tlb(mm, tsk) do {} while (0) | 37 | #define enter_lazy_tlb(mm, tsk) do {} while (0) |
39 | 38 | ||
39 | static inline void cpu_ran_vm(int cpu, struct mm_struct *mm) | ||
40 | { | ||
40 | #ifdef CONFIG_SMP | 41 | #ifdef CONFIG_SMP |
41 | #define cpu_ran_vm(cpu, mm) \ | 42 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
42 | cpumask_set_cpu((cpu), mm_cpumask(mm)) | 43 | #endif |
43 | #define cpu_maybe_ran_vm(cpu, mm) \ | 44 | } |
44 | cpumask_test_and_set_cpu((cpu), mm_cpumask(mm)) | 45 | |
46 | static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm) | ||
47 | { | ||
48 | #ifdef CONFIG_SMP | ||
49 | return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm)); | ||
45 | #else | 50 | #else |
46 | #define cpu_ran_vm(cpu, mm) do {} while (0) | 51 | return true; |
47 | #define cpu_maybe_ran_vm(cpu, mm) true | 52 | #endif |
48 | #endif /* CONFIG_SMP */ | 53 | } |
49 | 54 | ||
50 | /* | 55 | #ifdef CONFIG_MN10300_TLB_USE_PIDR |
51 | * allocate an MMU context | 56 | extern unsigned long mmu_context_cache[NR_CPUS]; |
57 | #define mm_context(mm) (mm->context.tlbpid[smp_processor_id()]) | ||
58 | |||
59 | /** | ||
60 | * allocate_mmu_context - Allocate storage for the arch-specific MMU data | ||
61 | * @mm: The userspace VM context being set up | ||
52 | */ | 62 | */ |
53 | static inline unsigned long allocate_mmu_context(struct mm_struct *mm) | 63 | static inline unsigned long allocate_mmu_context(struct mm_struct *mm) |
54 | { | 64 | { |
@@ -58,7 +68,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm) | |||
58 | if (!(mc & MMU_CONTEXT_TLBPID_MASK)) { | 68 | if (!(mc & MMU_CONTEXT_TLBPID_MASK)) { |
59 | /* we exhausted the TLB PIDs of this version on this CPU, so we | 69 | /* we exhausted the TLB PIDs of this version on this CPU, so we |
60 | * flush this CPU's TLB in its entirety and start new cycle */ | 70 | * flush this CPU's TLB in its entirety and start new cycle */ |
61 | flush_tlb_all(); | 71 | local_flush_tlb_all(); |
62 | 72 | ||
63 | /* fix the TLB version if needed (we avoid version #0 so as to | 73 | /* fix the TLB version if needed (we avoid version #0 so as to |
64 | * distingush MMU_NO_CONTEXT) */ | 74 | * distingush MMU_NO_CONTEXT) */ |
@@ -101,22 +111,34 @@ static inline int init_new_context(struct task_struct *tsk, | |||
101 | } | 111 | } |
102 | 112 | ||
103 | /* | 113 | /* |
104 | * destroy context related info for an mm_struct that is about to be put to | ||
105 | * rest | ||
106 | */ | ||
107 | #define destroy_context(mm) do { } while (0) | ||
108 | |||
109 | /* | ||
110 | * after we have set current->mm to a new value, this activates the context for | 114 | * after we have set current->mm to a new value, this activates the context for |
111 | * the new mm so we see the new mappings. | 115 | * the new mm so we see the new mappings. |
112 | */ | 116 | */ |
113 | static inline void activate_context(struct mm_struct *mm, int cpu) | 117 | static inline void activate_context(struct mm_struct *mm) |
114 | { | 118 | { |
115 | PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK; | 119 | PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK; |
116 | } | 120 | } |
121 | #else /* CONFIG_MN10300_TLB_USE_PIDR */ | ||
117 | 122 | ||
118 | /* | 123 | #define init_new_context(tsk, mm) (0) |
119 | * change between virtual memory sets | 124 | #define activate_context(mm) local_flush_tlb() |
125 | |||
126 | #endif /* CONFIG_MN10300_TLB_USE_PIDR */ | ||
127 | |||
128 | /** | ||
129 | * destroy_context - Destroy mm context information | ||
130 | * @mm: The MM being destroyed. | ||
131 | * | ||
132 | * Destroy context related info for an mm_struct that is about to be put to | ||
133 | * rest | ||
134 | */ | ||
135 | #define destroy_context(mm) do {} while (0) | ||
136 | |||
137 | /** | ||
138 | * switch_mm - Change between userspace virtual memory contexts | ||
139 | * @prev: The outgoing MM context. | ||
140 | * @next: The incoming MM context. | ||
141 | * @tsk: The incoming task. | ||
120 | */ | 142 | */ |
121 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 143 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
122 | struct task_struct *tsk) | 144 | struct task_struct *tsk) |
@@ -124,11 +146,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
124 | int cpu = smp_processor_id(); | 146 | int cpu = smp_processor_id(); |
125 | 147 | ||
126 | if (prev != next) { | 148 | if (prev != next) { |
149 | #ifdef CONFIG_SMP | ||
150 | per_cpu(cpu_tlbstate, cpu).active_mm = next; | ||
151 | #endif | ||
127 | cpu_ran_vm(cpu, next); | 152 | cpu_ran_vm(cpu, next); |
128 | activate_context(next, cpu); | ||
129 | PTBR = (unsigned long) next->pgd; | 153 | PTBR = (unsigned long) next->pgd; |
130 | } else if (!cpu_maybe_ran_vm(cpu, next)) { | 154 | activate_context(next); |
131 | activate_context(next, cpu); | ||
132 | } | 155 | } |
133 | } | 156 | } |
134 | 157 | ||