diff options
Diffstat (limited to 'arch/blackfin/include/asm/mmu_context.h')
| -rw-r--r-- | arch/blackfin/include/asm/mmu_context.h | 27 |
1 files changed, 19 insertions, 8 deletions
diff --git a/arch/blackfin/include/asm/mmu_context.h b/arch/blackfin/include/asm/mmu_context.h index 35593dda2a4d..944e29faae48 100644 --- a/arch/blackfin/include/asm/mmu_context.h +++ b/arch/blackfin/include/asm/mmu_context.h | |||
| @@ -37,6 +37,10 @@ | |||
| 37 | #include <asm/pgalloc.h> | 37 | #include <asm/pgalloc.h> |
| 38 | #include <asm/cplbinit.h> | 38 | #include <asm/cplbinit.h> |
| 39 | 39 | ||
| 40 | /* Note: L1 stacks are CPU-private things, so we bluntly disable this | ||
| 41 | feature in SMP mode, and use the per-CPU scratch SRAM bank only to | ||
| 42 | store the PDA instead. */ | ||
| 43 | |||
| 40 | extern void *current_l1_stack_save; | 44 | extern void *current_l1_stack_save; |
| 41 | extern int nr_l1stack_tasks; | 45 | extern int nr_l1stack_tasks; |
| 42 | extern void *l1_stack_base; | 46 | extern void *l1_stack_base; |
| @@ -88,12 +92,15 @@ activate_l1stack(struct mm_struct *mm, unsigned long sp_base) | |||
| 88 | static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, | 92 | static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, |
| 89 | struct task_struct *tsk) | 93 | struct task_struct *tsk) |
| 90 | { | 94 | { |
| 95 | #ifdef CONFIG_MPU | ||
| 96 | unsigned int cpu = smp_processor_id(); | ||
| 97 | #endif | ||
| 91 | if (prev_mm == next_mm) | 98 | if (prev_mm == next_mm) |
| 92 | return; | 99 | return; |
| 93 | #ifdef CONFIG_MPU | 100 | #ifdef CONFIG_MPU |
| 94 | if (prev_mm->context.page_rwx_mask == current_rwx_mask) { | 101 | if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) { |
| 95 | flush_switched_cplbs(); | 102 | flush_switched_cplbs(cpu); |
| 96 | set_mask_dcplbs(next_mm->context.page_rwx_mask); | 103 | set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu); |
| 97 | } | 104 | } |
| 98 | #endif | 105 | #endif |
| 99 | 106 | ||
| @@ -138,9 +145,10 @@ static inline void protect_page(struct mm_struct *mm, unsigned long addr, | |||
| 138 | 145 | ||
| 139 | static inline void update_protections(struct mm_struct *mm) | 146 | static inline void update_protections(struct mm_struct *mm) |
| 140 | { | 147 | { |
| 141 | if (mm->context.page_rwx_mask == current_rwx_mask) { | 148 | unsigned int cpu = smp_processor_id(); |
| 142 | flush_switched_cplbs(); | 149 | if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) { |
| 143 | set_mask_dcplbs(mm->context.page_rwx_mask); | 150 | flush_switched_cplbs(cpu); |
| 151 | set_mask_dcplbs(mm->context.page_rwx_mask, cpu); | ||
| 144 | } | 152 | } |
| 145 | } | 153 | } |
| 146 | #endif | 154 | #endif |
| @@ -165,6 +173,9 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
| 165 | static inline void destroy_context(struct mm_struct *mm) | 173 | static inline void destroy_context(struct mm_struct *mm) |
| 166 | { | 174 | { |
| 167 | struct sram_list_struct *tmp; | 175 | struct sram_list_struct *tmp; |
| 176 | #ifdef CONFIG_MPU | ||
| 177 | unsigned int cpu = smp_processor_id(); | ||
| 178 | #endif | ||
| 168 | 179 | ||
| 169 | #ifdef CONFIG_APP_STACK_L1 | 180 | #ifdef CONFIG_APP_STACK_L1 |
| 170 | if (current_l1_stack_save == mm->context.l1_stack_save) | 181 | if (current_l1_stack_save == mm->context.l1_stack_save) |
| @@ -179,8 +190,8 @@ static inline void destroy_context(struct mm_struct *mm) | |||
| 179 | kfree(tmp); | 190 | kfree(tmp); |
| 180 | } | 191 | } |
| 181 | #ifdef CONFIG_MPU | 192 | #ifdef CONFIG_MPU |
| 182 | if (current_rwx_mask == mm->context.page_rwx_mask) | 193 | if (current_rwx_mask[cpu] == mm->context.page_rwx_mask) |
| 183 | current_rwx_mask = NULL; | 194 | current_rwx_mask[cpu] = NULL; |
| 184 | free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order); | 195 | free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order); |
| 185 | #endif | 196 | #endif |
| 186 | } | 197 | } |
