aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStuart Menefy <stuart.menefy@st.com>2006-11-20 23:53:44 -0500
committerPaul Mundt <lethal@linux-sh.org>2006-12-05 20:45:38 -0500
commit6e4662ff49c6b94e16a47bfddb920576963b5a20 (patch)
treeb53bcf136cc8df9e09f63f06321cad3622694c70
parentb5a1bcbee434b843c8850a968d9a6c7541f1be9d (diff)
sh: Use MMU.TTB register as pointer to current pgd.
Add TTB accessor functions and give it a sensible default value. We will use this later for optimizing the fault path. Signed-off-by: Stuart Menefy <stuart.menefy@st.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/mm/init.c18
-rw-r--r--include/asm-sh/mmu_context.h44
2 files changed, 31 insertions, 31 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 8b275166f40..8c8d3911838 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -155,9 +155,6 @@ extern char __init_begin, __init_end;
155 155
156/* 156/*
157 * paging_init() sets up the page tables 157 * paging_init() sets up the page tables
158 *
159 * This routines also unmaps the page at virtual kernel address 0, so
160 * that we can trap those pesky NULL-reference errors in the kernel.
161 */ 158 */
162void __init paging_init(void) 159void __init paging_init(void)
163{ 160{
@@ -180,14 +177,11 @@ void __init paging_init(void)
180 */ 177 */
181 { 178 {
182 unsigned long max_dma, low, start_pfn; 179 unsigned long max_dma, low, start_pfn;
183 pgd_t *pg_dir;
184 int i;
185
186 /* We don't need kernel mapping as hardware support that. */
187 pg_dir = swapper_pg_dir;
188 180
189 for (i = 0; i < PTRS_PER_PGD; i++) 181 /* We don't need to map the kernel through the TLB, as
190 pgd_val(pg_dir[i]) = 0; 182 * it is permanatly mapped using P1. So clear the
183 * entire pgd. */
184 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
191 185
192 /* Turn on the MMU */ 186 /* Turn on the MMU */
193 enable_mmu(); 187 enable_mmu();
@@ -206,6 +200,10 @@ void __init paging_init(void)
206 } 200 }
207 } 201 }
208 202
203 /* Set an initial value for the MMU.TTB so we don't have to
204 * check for a null value. */
205 set_TTB(swapper_pg_dir);
206
209#elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) 207#elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
210 /* 208 /*
211 * If we don't have CONFIG_MMU set and the processor in question 209 * If we don't have CONFIG_MMU set and the processor in question
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index c7088efe579..46f04e23bd4 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -10,7 +10,6 @@
10 10
11#include <asm/cpu/mmu_context.h> 11#include <asm/cpu/mmu_context.h>
12#include <asm/tlbflush.h> 12#include <asm/tlbflush.h>
13#include <asm/pgalloc.h>
14#include <asm/uaccess.h> 13#include <asm/uaccess.h>
15#include <asm/io.h> 14#include <asm/io.h>
16 15
@@ -42,10 +41,8 @@ extern unsigned long mmu_context_cache;
42/* 41/*
43 * Get MMU context if needed. 42 * Get MMU context if needed.
44 */ 43 */
45static __inline__ void 44static inline void get_mmu_context(struct mm_struct *mm)
46get_mmu_context(struct mm_struct *mm)
47{ 45{
48 extern void flush_tlb_all(void);
49 unsigned long mc = mmu_context_cache; 46 unsigned long mc = mmu_context_cache;
50 47
51 /* Check if we have old version of context. */ 48 /* Check if we have old version of context. */
@@ -61,6 +58,7 @@ get_mmu_context(struct mm_struct *mm)
61 * Flush all TLB and start new cycle. 58 * Flush all TLB and start new cycle.
62 */ 59 */
63 flush_tlb_all(); 60 flush_tlb_all();
61
64 /* 62 /*
65 * Fix version; Note that we avoid version #0 63 * Fix version; Note that we avoid version #0
66 * to distingush NO_CONTEXT. 64 * to distingush NO_CONTEXT.
@@ -75,11 +73,10 @@ get_mmu_context(struct mm_struct *mm)
75 * Initialize the context related info for a new mm_struct 73 * Initialize the context related info for a new mm_struct
76 * instance. 74 * instance.
77 */ 75 */
78static __inline__ int init_new_context(struct task_struct *tsk, 76static inline int init_new_context(struct task_struct *tsk,
79 struct mm_struct *mm) 77 struct mm_struct *mm)
80{ 78{
81 mm->context.id = NO_CONTEXT; 79 mm->context.id = NO_CONTEXT;
82
83 return 0; 80 return 0;
84} 81}
85 82
@@ -87,12 +84,12 @@ static __inline__ int init_new_context(struct task_struct *tsk,
87 * Destroy context related info for an mm_struct that is about 84 * Destroy context related info for an mm_struct that is about
88 * to be put to rest. 85 * to be put to rest.
89 */ 86 */
90static __inline__ void destroy_context(struct mm_struct *mm) 87static inline void destroy_context(struct mm_struct *mm)
91{ 88{
92 /* Do nothing */ 89 /* Do nothing */
93} 90}
94 91
95static __inline__ void set_asid(unsigned long asid) 92static inline void set_asid(unsigned long asid)
96{ 93{
97 unsigned long __dummy; 94 unsigned long __dummy;
98 95
@@ -105,7 +102,7 @@ static __inline__ void set_asid(unsigned long asid)
105 "r" (0xffffff00)); 102 "r" (0xffffff00));
106} 103}
107 104
108static __inline__ unsigned long get_asid(void) 105static inline unsigned long get_asid(void)
109{ 106{
110 unsigned long asid; 107 unsigned long asid;
111 108
@@ -120,24 +117,29 @@ static __inline__ unsigned long get_asid(void)
120 * After we have set current->mm to a new value, this activates 117 * After we have set current->mm to a new value, this activates
121 * the context for the new mm so we see the new mappings. 118 * the context for the new mm so we see the new mappings.
122 */ 119 */
123static __inline__ void activate_context(struct mm_struct *mm) 120static inline void activate_context(struct mm_struct *mm)
124{ 121{
125 get_mmu_context(mm); 122 get_mmu_context(mm);
126 set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK); 123 set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK);
127} 124}
128 125
129/* MMU_TTB can be used for optimizing the fault handling. 126/* MMU_TTB is used for optimizing the fault handling. */
130 (Currently not used) */ 127static inline void set_TTB(pgd_t *pgd)
131static __inline__ void switch_mm(struct mm_struct *prev,
132 struct mm_struct *next,
133 struct task_struct *tsk)
134{ 128{
135 if (likely(prev != next)) { 129 ctrl_outl((unsigned long)pgd, MMU_TTB);
136 unsigned long __pgdir = (unsigned long)next->pgd; 130}
137 131
138 __asm__ __volatile__("mov.l %0, %1" 132static inline pgd_t *get_TTB(void)
139 : /* no output */ 133{
140 : "r" (__pgdir), "m" (__m(MMU_TTB))); 134 return (pgd_t *)ctrl_inl(MMU_TTB);
135}
136
137static inline void switch_mm(struct mm_struct *prev,
138 struct mm_struct *next,
139 struct task_struct *tsk)
140{
141 if (likely(prev != next)) {
142 set_TTB(next->pgd);
141 activate_context(next); 143 activate_context(next);
142 } 144 }
143} 145}
@@ -147,7 +149,7 @@ static __inline__ void switch_mm(struct mm_struct *prev,
147#define activate_mm(prev, next) \ 149#define activate_mm(prev, next) \
148 switch_mm((prev),(next),NULL) 150 switch_mm((prev),(next),NULL)
149 151
150static __inline__ void 152static inline void
151enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 153enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
152{ 154{
153} 155}