diff options
Diffstat (limited to 'include/asm-sh')
-rw-r--r-- | include/asm-sh/mmu_context.h | 44 |
1 files changed, 23 insertions, 21 deletions
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h index c7088efe579a..46f04e23bd45 100644 --- a/include/asm-sh/mmu_context.h +++ b/include/asm-sh/mmu_context.h | |||
@@ -10,7 +10,6 @@ | |||
10 | 10 | ||
11 | #include <asm/cpu/mmu_context.h> | 11 | #include <asm/cpu/mmu_context.h> |
12 | #include <asm/tlbflush.h> | 12 | #include <asm/tlbflush.h> |
13 | #include <asm/pgalloc.h> | ||
14 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
15 | #include <asm/io.h> | 14 | #include <asm/io.h> |
16 | 15 | ||
@@ -42,10 +41,8 @@ extern unsigned long mmu_context_cache; | |||
42 | /* | 41 | /* |
43 | * Get MMU context if needed. | 42 | * Get MMU context if needed. |
44 | */ | 43 | */ |
45 | static __inline__ void | 44 | static inline void get_mmu_context(struct mm_struct *mm) |
46 | get_mmu_context(struct mm_struct *mm) | ||
47 | { | 45 | { |
48 | extern void flush_tlb_all(void); | ||
49 | unsigned long mc = mmu_context_cache; | 46 | unsigned long mc = mmu_context_cache; |
50 | 47 | ||
51 | /* Check if we have old version of context. */ | 48 | /* Check if we have old version of context. */ |
@@ -61,6 +58,7 @@ get_mmu_context(struct mm_struct *mm) | |||
61 | * Flush all TLB and start new cycle. | 58 | * Flush all TLB and start new cycle. |
62 | */ | 59 | */ |
63 | flush_tlb_all(); | 60 | flush_tlb_all(); |
61 | |||
64 | /* | 62 | /* |
65 | * Fix version; Note that we avoid version #0 | 63 | * Fix version; Note that we avoid version #0 |
66 | * to distingush NO_CONTEXT. | 64 | * to distingush NO_CONTEXT. |
@@ -75,11 +73,10 @@ get_mmu_context(struct mm_struct *mm) | |||
75 | * Initialize the context related info for a new mm_struct | 73 | * Initialize the context related info for a new mm_struct |
76 | * instance. | 74 | * instance. |
77 | */ | 75 | */ |
78 | static __inline__ int init_new_context(struct task_struct *tsk, | 76 | static inline int init_new_context(struct task_struct *tsk, |
79 | struct mm_struct *mm) | 77 | struct mm_struct *mm) |
80 | { | 78 | { |
81 | mm->context.id = NO_CONTEXT; | 79 | mm->context.id = NO_CONTEXT; |
82 | |||
83 | return 0; | 80 | return 0; |
84 | } | 81 | } |
85 | 82 | ||
@@ -87,12 +84,12 @@ static __inline__ int init_new_context(struct task_struct *tsk, | |||
87 | * Destroy context related info for an mm_struct that is about | 84 | * Destroy context related info for an mm_struct that is about |
88 | * to be put to rest. | 85 | * to be put to rest. |
89 | */ | 86 | */ |
90 | static __inline__ void destroy_context(struct mm_struct *mm) | 87 | static inline void destroy_context(struct mm_struct *mm) |
91 | { | 88 | { |
92 | /* Do nothing */ | 89 | /* Do nothing */ |
93 | } | 90 | } |
94 | 91 | ||
95 | static __inline__ void set_asid(unsigned long asid) | 92 | static inline void set_asid(unsigned long asid) |
96 | { | 93 | { |
97 | unsigned long __dummy; | 94 | unsigned long __dummy; |
98 | 95 | ||
@@ -105,7 +102,7 @@ static __inline__ void set_asid(unsigned long asid) | |||
105 | "r" (0xffffff00)); | 102 | "r" (0xffffff00)); |
106 | } | 103 | } |
107 | 104 | ||
108 | static __inline__ unsigned long get_asid(void) | 105 | static inline unsigned long get_asid(void) |
109 | { | 106 | { |
110 | unsigned long asid; | 107 | unsigned long asid; |
111 | 108 | ||
@@ -120,24 +117,29 @@ static __inline__ unsigned long get_asid(void) | |||
120 | * After we have set current->mm to a new value, this activates | 117 | * After we have set current->mm to a new value, this activates |
121 | * the context for the new mm so we see the new mappings. | 118 | * the context for the new mm so we see the new mappings. |
122 | */ | 119 | */ |
123 | static __inline__ void activate_context(struct mm_struct *mm) | 120 | static inline void activate_context(struct mm_struct *mm) |
124 | { | 121 | { |
125 | get_mmu_context(mm); | 122 | get_mmu_context(mm); |
126 | set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK); | 123 | set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK); |
127 | } | 124 | } |
128 | 125 | ||
129 | /* MMU_TTB can be used for optimizing the fault handling. | 126 | /* MMU_TTB is used for optimizing the fault handling. */ |
130 | (Currently not used) */ | 127 | static inline void set_TTB(pgd_t *pgd) |
131 | static __inline__ void switch_mm(struct mm_struct *prev, | ||
132 | struct mm_struct *next, | ||
133 | struct task_struct *tsk) | ||
134 | { | 128 | { |
135 | if (likely(prev != next)) { | 129 | ctrl_outl((unsigned long)pgd, MMU_TTB); |
136 | unsigned long __pgdir = (unsigned long)next->pgd; | 130 | } |
137 | 131 | ||
138 | __asm__ __volatile__("mov.l %0, %1" | 132 | static inline pgd_t *get_TTB(void) |
139 | : /* no output */ | 133 | { |
140 | : "r" (__pgdir), "m" (__m(MMU_TTB))); | 134 | return (pgd_t *)ctrl_inl(MMU_TTB); |
135 | } | ||
136 | |||
137 | static inline void switch_mm(struct mm_struct *prev, | ||
138 | struct mm_struct *next, | ||
139 | struct task_struct *tsk) | ||
140 | { | ||
141 | if (likely(prev != next)) { | ||
142 | set_TTB(next->pgd); | ||
141 | activate_context(next); | 143 | activate_context(next); |
142 | } | 144 | } |
143 | } | 145 | } |
@@ -147,7 +149,7 @@ static __inline__ void switch_mm(struct mm_struct *prev, | |||
147 | #define activate_mm(prev, next) \ | 149 | #define activate_mm(prev, next) \ |
148 | switch_mm((prev),(next),NULL) | 150 | switch_mm((prev),(next),NULL) |
149 | 151 | ||
150 | static __inline__ void | 152 | static inline void |
151 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 153 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
152 | { | 154 | { |
153 | } | 155 | } |