diff options
-rw-r--r-- | arch/powerpc/mm/mmu_context_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/ppc_mmu_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_32.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/powermac/cpufreq_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/powermac/setup.c | 2 | ||||
-rw-r--r-- | arch/ppc/mm/mmu_context.c | 2 | ||||
-rw-r--r-- | drivers/macintosh/via-pmu.c | 4 | ||||
-rw-r--r-- | include/asm-ppc/mmu.h | 6 | ||||
-rw-r--r-- | include/asm-ppc/mmu_context.h | 27 | ||||
-rw-r--r-- | include/asm-ppc/pgtable.h | 2 |
10 files changed, 31 insertions, 24 deletions
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c index a8816e0f6a86..e326e4249e1a 100644 --- a/arch/powerpc/mm/mmu_context_32.c +++ b/arch/powerpc/mm/mmu_context_32.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <asm/mmu_context.h> | 30 | #include <asm/mmu_context.h> |
31 | #include <asm/tlbflush.h> | 31 | #include <asm/tlbflush.h> |
32 | 32 | ||
33 | mm_context_t next_mmu_context; | 33 | unsigned long next_mmu_context; |
34 | unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; | 34 | unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; |
35 | #ifdef FEW_CONTEXTS | 35 | #ifdef FEW_CONTEXTS |
36 | atomic_t nr_free_contexts; | 36 | atomic_t nr_free_contexts; |
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index ed7fcfe5fd37..1df731e42b50 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c | |||
@@ -190,7 +190,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
190 | return; | 190 | return; |
191 | pmd = pmd_offset(pgd_offset(mm, ea), ea); | 191 | pmd = pmd_offset(pgd_offset(mm, ea), ea); |
192 | if (!pmd_none(*pmd)) | 192 | if (!pmd_none(*pmd)) |
193 | add_hash_page(mm->context, ea, pmd_val(*pmd)); | 193 | add_hash_page(mm->context.id, ea, pmd_val(*pmd)); |
194 | } | 194 | } |
195 | 195 | ||
196 | /* | 196 | /* |
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c index ad580f3742e5..02eb23e036d5 100644 --- a/arch/powerpc/mm/tlb_32.c +++ b/arch/powerpc/mm/tlb_32.c | |||
@@ -42,7 +42,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) | |||
42 | 42 | ||
43 | if (Hash != 0) { | 43 | if (Hash != 0) { |
44 | ptephys = __pa(ptep) & PAGE_MASK; | 44 | ptephys = __pa(ptep) & PAGE_MASK; |
45 | flush_hash_pages(mm->context, addr, ptephys, 1); | 45 | flush_hash_pages(mm->context.id, addr, ptephys, 1); |
46 | } | 46 | } |
47 | } | 47 | } |
48 | 48 | ||
@@ -102,7 +102,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, | |||
102 | pmd_t *pmd; | 102 | pmd_t *pmd; |
103 | unsigned long pmd_end; | 103 | unsigned long pmd_end; |
104 | int count; | 104 | int count; |
105 | unsigned int ctx = mm->context; | 105 | unsigned int ctx = mm->context.id; |
106 | 106 | ||
107 | if (Hash == 0) { | 107 | if (Hash == 0) { |
108 | _tlbia(); | 108 | _tlbia(); |
@@ -172,7 +172,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) | |||
172 | mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; | 172 | mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; |
173 | pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr); | 173 | pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr); |
174 | if (!pmd_none(*pmd)) | 174 | if (!pmd_none(*pmd)) |
175 | flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1); | 175 | flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); |
176 | FINISH_FLUSH; | 176 | FINISH_FLUSH; |
177 | } | 177 | } |
178 | 178 | ||
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index cfd6527a0d7e..af2a8f9f1222 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c | |||
@@ -314,7 +314,7 @@ static int pmu_set_cpu_speed(int low_speed) | |||
314 | _set_L3CR(save_l3cr); | 314 | _set_L3CR(save_l3cr); |
315 | 315 | ||
316 | /* Restore userland MMU context */ | 316 | /* Restore userland MMU context */ |
317 | set_context(current->active_mm->context, current->active_mm->pgd); | 317 | set_context(current->active_mm->context.id, current->active_mm->pgd); |
318 | 318 | ||
319 | #ifdef DEBUG_FREQ | 319 | #ifdef DEBUG_FREQ |
320 | printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1)); | 320 | printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1)); |
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index b9200fb07815..9cc7db7a8bdc 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c | |||
@@ -458,7 +458,7 @@ static int pmac_pm_finish(suspend_state_t state) | |||
458 | printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state); | 458 | printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state); |
459 | 459 | ||
460 | /* Restore userland MMU context */ | 460 | /* Restore userland MMU context */ |
461 | set_context(current->active_mm->context, current->active_mm->pgd); | 461 | set_context(current->active_mm->context.id, current->active_mm->pgd); |
462 | 462 | ||
463 | return 0; | 463 | return 0; |
464 | } | 464 | } |
diff --git a/arch/ppc/mm/mmu_context.c b/arch/ppc/mm/mmu_context.c index b4a4b3f02a1c..8784f3715032 100644 --- a/arch/ppc/mm/mmu_context.c +++ b/arch/ppc/mm/mmu_context.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <asm/mmu_context.h> | 30 | #include <asm/mmu_context.h> |
31 | #include <asm/tlbflush.h> | 31 | #include <asm/tlbflush.h> |
32 | 32 | ||
33 | mm_context_t next_mmu_context; | 33 | unsigned long next_mmu_context; |
34 | unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; | 34 | unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; |
35 | #ifdef FEW_CONTEXTS | 35 | #ifdef FEW_CONTEXTS |
36 | atomic_t nr_free_contexts; | 36 | atomic_t nr_free_contexts; |
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 0b5ff553e39a..c63d4e7984be 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c | |||
@@ -2268,7 +2268,7 @@ static int powerbook_sleep_grackle(void) | |||
2268 | _set_L2CR(save_l2cr); | 2268 | _set_L2CR(save_l2cr); |
2269 | 2269 | ||
2270 | /* Restore userland MMU context */ | 2270 | /* Restore userland MMU context */ |
2271 | set_context(current->active_mm->context, current->active_mm->pgd); | 2271 | set_context(current->active_mm->context.id, current->active_mm->pgd); |
2272 | 2272 | ||
2273 | /* Power things up */ | 2273 | /* Power things up */ |
2274 | pmu_unlock(); | 2274 | pmu_unlock(); |
@@ -2366,7 +2366,7 @@ powerbook_sleep_Core99(void) | |||
2366 | _set_L3CR(save_l3cr); | 2366 | _set_L3CR(save_l3cr); |
2367 | 2367 | ||
2368 | /* Restore userland MMU context */ | 2368 | /* Restore userland MMU context */ |
2369 | set_context(current->active_mm->context, current->active_mm->pgd); | 2369 | set_context(current->active_mm->context.id, current->active_mm->pgd); |
2370 | 2370 | ||
2371 | /* Tell PMU we are ready */ | 2371 | /* Tell PMU we are ready */ |
2372 | pmu_unlock(); | 2372 | pmu_unlock(); |
diff --git a/include/asm-ppc/mmu.h b/include/asm-ppc/mmu.h index 9205db404c7a..80ae60481fb7 100644 --- a/include/asm-ppc/mmu.h +++ b/include/asm-ppc/mmu.h | |||
@@ -24,8 +24,10 @@ extern phys_addr_t fixup_bigphys_addr(phys_addr_t, phys_addr_t); | |||
24 | #define PHYS_FMT "%16Lx" | 24 | #define PHYS_FMT "%16Lx" |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | /* Default "unsigned long" context */ | 27 | typedef struct { |
28 | typedef unsigned long mm_context_t; | 28 | unsigned long id; |
29 | unsigned long vdso_base; | ||
30 | } mm_context_t; | ||
29 | 31 | ||
30 | /* Hardware Page Table Entry */ | 32 | /* Hardware Page Table Entry */ |
31 | typedef struct _PTE { | 33 | typedef struct _PTE { |
diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h index 4f152cca13c1..4454ecf1aed5 100644 --- a/include/asm-ppc/mmu_context.h +++ b/include/asm-ppc/mmu_context.h | |||
@@ -71,7 +71,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
71 | #else | 71 | #else |
72 | 72 | ||
73 | /* PPC 6xx, 7xx CPUs */ | 73 | /* PPC 6xx, 7xx CPUs */ |
74 | #define NO_CONTEXT ((mm_context_t) -1) | 74 | #define NO_CONTEXT ((unsigned long) -1) |
75 | #define LAST_CONTEXT 32767 | 75 | #define LAST_CONTEXT 32767 |
76 | #define FIRST_CONTEXT 1 | 76 | #define FIRST_CONTEXT 1 |
77 | #endif | 77 | #endif |
@@ -86,7 +86,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
86 | * can be used for debugging on all processors (if you happen to have | 86 | * can be used for debugging on all processors (if you happen to have |
87 | * an Abatron). | 87 | * an Abatron). |
88 | */ | 88 | */ |
89 | extern void set_context(mm_context_t context, pgd_t *pgd); | 89 | extern void set_context(unsigned long contextid, pgd_t *pgd); |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * Bitmap of contexts in use. | 92 | * Bitmap of contexts in use. |
@@ -99,7 +99,7 @@ extern unsigned long context_map[]; | |||
99 | * Its use is an optimization only, we can't rely on this context | 99 | * Its use is an optimization only, we can't rely on this context |
100 | * number to be free, but it usually will be. | 100 | * number to be free, but it usually will be. |
101 | */ | 101 | */ |
102 | extern mm_context_t next_mmu_context; | 102 | extern unsigned long next_mmu_context; |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * If we don't have sufficient contexts to give one to every task | 105 | * If we don't have sufficient contexts to give one to every task |
@@ -118,9 +118,9 @@ extern void steal_context(void); | |||
118 | */ | 118 | */ |
119 | static inline void get_mmu_context(struct mm_struct *mm) | 119 | static inline void get_mmu_context(struct mm_struct *mm) |
120 | { | 120 | { |
121 | mm_context_t ctx; | 121 | unsigned long ctx; |
122 | 122 | ||
123 | if (mm->context != NO_CONTEXT) | 123 | if (mm->context.id != NO_CONTEXT) |
124 | return; | 124 | return; |
125 | #ifdef FEW_CONTEXTS | 125 | #ifdef FEW_CONTEXTS |
126 | while (atomic_dec_if_positive(&nr_free_contexts) < 0) | 126 | while (atomic_dec_if_positive(&nr_free_contexts) < 0) |
@@ -133,7 +133,7 @@ static inline void get_mmu_context(struct mm_struct *mm) | |||
133 | ctx = 0; | 133 | ctx = 0; |
134 | } | 134 | } |
135 | next_mmu_context = (ctx + 1) & LAST_CONTEXT; | 135 | next_mmu_context = (ctx + 1) & LAST_CONTEXT; |
136 | mm->context = ctx; | 136 | mm->context.id = ctx; |
137 | #ifdef FEW_CONTEXTS | 137 | #ifdef FEW_CONTEXTS |
138 | context_mm[ctx] = mm; | 138 | context_mm[ctx] = mm; |
139 | #endif | 139 | #endif |
@@ -142,7 +142,12 @@ static inline void get_mmu_context(struct mm_struct *mm) | |||
142 | /* | 142 | /* |
143 | * Set up the context for a new address space. | 143 | * Set up the context for a new address space. |
144 | */ | 144 | */ |
145 | #define init_new_context(tsk,mm) (((mm)->context = NO_CONTEXT), 0) | 145 | static inline int init_new_context(struct task_struct *t, struct mm_struct *mm) |
146 | { | ||
147 | mm->context.id = NO_CONTEXT; | ||
148 | mm->context.vdso_base = 0; | ||
149 | return 0; | ||
150 | } | ||
146 | 151 | ||
147 | /* | 152 | /* |
148 | * We're finished using the context for an address space. | 153 | * We're finished using the context for an address space. |
@@ -150,9 +155,9 @@ static inline void get_mmu_context(struct mm_struct *mm) | |||
150 | static inline void destroy_context(struct mm_struct *mm) | 155 | static inline void destroy_context(struct mm_struct *mm) |
151 | { | 156 | { |
152 | preempt_disable(); | 157 | preempt_disable(); |
153 | if (mm->context != NO_CONTEXT) { | 158 | if (mm->context.id != NO_CONTEXT) { |
154 | clear_bit(mm->context, context_map); | 159 | clear_bit(mm->context.id, context_map); |
155 | mm->context = NO_CONTEXT; | 160 | mm->context.id = NO_CONTEXT; |
156 | #ifdef FEW_CONTEXTS | 161 | #ifdef FEW_CONTEXTS |
157 | atomic_inc(&nr_free_contexts); | 162 | atomic_inc(&nr_free_contexts); |
158 | #endif | 163 | #endif |
@@ -180,7 +185,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
180 | 185 | ||
181 | /* Setup new userspace context */ | 186 | /* Setup new userspace context */ |
182 | get_mmu_context(next); | 187 | get_mmu_context(next); |
183 | set_context(next->context, next->pgd); | 188 | set_context(next->context.id, next->pgd); |
184 | } | 189 | } |
185 | 190 | ||
186 | #define deactivate_mm(tsk,mm) do { } while (0) | 191 | #define deactivate_mm(tsk,mm) do { } while (0) |
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index 570b355162fa..f886066bd15c 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h | |||
@@ -663,7 +663,7 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon | |||
663 | return (old & _PAGE_ACCESSED) != 0; | 663 | return (old & _PAGE_ACCESSED) != 0; |
664 | } | 664 | } |
665 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | 665 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ |
666 | __ptep_test_and_clear_young((__vma)->vm_mm->context, __addr, __ptep) | 666 | __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) |
667 | 667 | ||
668 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | 668 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY |
669 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, | 669 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, |