diff options
Diffstat (limited to 'include/asm-blackfin/mmu_context.h')
-rw-r--r-- | include/asm-blackfin/mmu_context.h | 62 |
1 files changed, 57 insertions, 5 deletions
diff --git a/include/asm-blackfin/mmu_context.h b/include/asm-blackfin/mmu_context.h index c5c71a6aaf19..b5eb67596ad5 100644 --- a/include/asm-blackfin/mmu_context.h +++ b/include/asm-blackfin/mmu_context.h | |||
@@ -30,9 +30,12 @@ | |||
30 | #ifndef __BLACKFIN_MMU_CONTEXT_H__ | 30 | #ifndef __BLACKFIN_MMU_CONTEXT_H__ |
31 | #define __BLACKFIN_MMU_CONTEXT_H__ | 31 | #define __BLACKFIN_MMU_CONTEXT_H__ |
32 | 32 | ||
33 | #include <linux/gfp.h> | ||
34 | #include <linux/sched.h> | ||
33 | #include <asm/setup.h> | 35 | #include <asm/setup.h> |
34 | #include <asm/page.h> | 36 | #include <asm/page.h> |
35 | #include <asm/pgalloc.h> | 37 | #include <asm/pgalloc.h> |
38 | #include <asm/cplbinit.h> | ||
36 | 39 | ||
37 | extern void *current_l1_stack_save; | 40 | extern void *current_l1_stack_save; |
38 | extern int nr_l1stack_tasks; | 41 | extern int nr_l1stack_tasks; |
@@ -50,6 +53,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
50 | static inline int | 53 | static inline int |
51 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 54 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
52 | { | 55 | { |
56 | #ifdef CONFIG_MPU | ||
57 | unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order); | ||
58 | mm->context.page_rwx_mask = (unsigned long *)p; | ||
59 | memset(mm->context.page_rwx_mask, 0, | ||
60 | page_mask_nelts * 3 * sizeof(long)); | ||
61 | #endif | ||
53 | return 0; | 62 | return 0; |
54 | } | 63 | } |
55 | 64 | ||
@@ -73,6 +82,11 @@ static inline void destroy_context(struct mm_struct *mm) | |||
73 | sram_free(tmp->addr); | 82 | sram_free(tmp->addr); |
74 | kfree(tmp); | 83 | kfree(tmp); |
75 | } | 84 | } |
85 | #ifdef CONFIG_MPU | ||
86 | if (current_rwx_mask == mm->context.page_rwx_mask) | ||
87 | current_rwx_mask = NULL; | ||
88 | free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order); | ||
89 | #endif | ||
76 | } | 90 | } |
77 | 91 | ||
78 | static inline unsigned long | 92 | static inline unsigned long |
@@ -106,9 +120,21 @@ activate_l1stack(struct mm_struct *mm, unsigned long sp_base) | |||
106 | 120 | ||
107 | #define deactivate_mm(tsk,mm) do { } while (0) | 121 | #define deactivate_mm(tsk,mm) do { } while (0) |
108 | 122 | ||
109 | static inline void activate_mm(struct mm_struct *prev_mm, | 123 | #define activate_mm(prev, next) switch_mm(prev, next, NULL) |
110 | struct mm_struct *next_mm) | 124 | |
125 | static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, | ||
126 | struct task_struct *tsk) | ||
111 | { | 127 | { |
128 | if (prev_mm == next_mm) | ||
129 | return; | ||
130 | #ifdef CONFIG_MPU | ||
131 | if (prev_mm->context.page_rwx_mask == current_rwx_mask) { | ||
132 | flush_switched_cplbs(); | ||
133 | set_mask_dcplbs(next_mm->context.page_rwx_mask); | ||
134 | } | ||
135 | #endif | ||
136 | |||
137 | /* L1 stack switching. */ | ||
112 | if (!next_mm->context.l1_stack_save) | 138 | if (!next_mm->context.l1_stack_save) |
113 | return; | 139 | return; |
114 | if (next_mm->context.l1_stack_save == current_l1_stack_save) | 140 | if (next_mm->context.l1_stack_save == current_l1_stack_save) |
@@ -120,10 +146,36 @@ static inline void activate_mm(struct mm_struct *prev_mm, | |||
120 | memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len); | 146 | memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len); |
121 | } | 147 | } |
122 | 148 | ||
123 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 149 | #ifdef CONFIG_MPU |
124 | struct task_struct *tsk) | 150 | static inline void protect_page(struct mm_struct *mm, unsigned long addr, |
151 | unsigned long flags) | ||
152 | { | ||
153 | unsigned long *mask = mm->context.page_rwx_mask; | ||
154 | unsigned long page = addr >> 12; | ||
155 | unsigned long idx = page >> 5; | ||
156 | unsigned long bit = 1 << (page & 31); | ||
157 | |||
158 | if (flags & VM_MAYREAD) | ||
159 | mask[idx] |= bit; | ||
160 | else | ||
161 | mask[idx] &= ~bit; | ||
162 | mask += page_mask_nelts; | ||
163 | if (flags & VM_MAYWRITE) | ||
164 | mask[idx] |= bit; | ||
165 | else | ||
166 | mask[idx] &= ~bit; | ||
167 | mask += page_mask_nelts; | ||
168 | if (flags & VM_MAYEXEC) | ||
169 | mask[idx] |= bit; | ||
170 | else | ||
171 | mask[idx] &= ~bit; | ||
172 | } | ||
173 | |||
174 | static inline void update_protections(struct mm_struct *mm) | ||
125 | { | 175 | { |
126 | activate_mm(prev, next); | 176 | flush_switched_cplbs(); |
177 | set_mask_dcplbs(mm->context.page_rwx_mask); | ||
127 | } | 178 | } |
179 | #endif | ||
128 | 180 | ||
129 | #endif | 181 | #endif |