aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/pmb.c
diff options
context:
space:
mode:
authorStuart Menefy <stuart.menefy@st.com>2007-11-30 03:06:36 -0500
committerPaul Mundt <lethal@linux-sh.org>2008-01-27 23:18:59 -0500
commitcbaa118ecfd99fc5ed7adbd9c34a30e1c05e3c93 (patch)
treee60db5c0f3573558c97f39cfab78732220a72e6d /arch/sh/mm/pmb.c
parent325df7f20467da07901c4f2b006d3457bba0adec (diff)
sh: Preparation for uncached jumps through PMB.
Presently most of the 29-bit physical parts do P1/P2 segmentation with a 1:1 cached/uncached mapping, jumping between the two to control the caching behaviour. This provides the basic infrastructure to maintain this behaviour on 32-bit physical parts that don't map P1/P2 at all, using a shiny new linker section and corresponding fixmap entry. Signed-off-by: Stuart Menefy <stuart.menefy@st.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r--arch/sh/mm/pmb.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index ef6ab39eaf65..ab81c602295f 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -163,18 +163,18 @@ repeat:
163 return 0; 163 return 0;
164} 164}
165 165
166int set_pmb_entry(struct pmb_entry *pmbe) 166int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
167{ 167{
168 int ret; 168 int ret;
169 169
170 jump_to_P2(); 170 jump_to_uncached();
171 ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); 171 ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
172 back_to_P1(); 172 back_to_cached();
173 173
174 return ret; 174 return ret;
175} 175}
176 176
177void clear_pmb_entry(struct pmb_entry *pmbe) 177void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
178{ 178{
179 unsigned int entry = pmbe->entry; 179 unsigned int entry = pmbe->entry;
180 unsigned long addr; 180 unsigned long addr;
@@ -188,7 +188,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
188 entry >= NR_PMB_ENTRIES)) 188 entry >= NR_PMB_ENTRIES))
189 return; 189 return;
190 190
191 jump_to_P2(); 191 jump_to_uncached();
192 192
193 /* Clear V-bit */ 193 /* Clear V-bit */
194 addr = mk_pmb_addr(entry); 194 addr = mk_pmb_addr(entry);
@@ -197,7 +197,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
197 addr = mk_pmb_data(entry); 197 addr = mk_pmb_data(entry);
198 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); 198 ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
199 199
200 back_to_P1(); 200 back_to_cached();
201 201
202 clear_bit(entry, &pmb_map); 202 clear_bit(entry, &pmb_map);
203} 203}
@@ -302,7 +302,7 @@ static void pmb_cache_ctor(struct kmem_cache *cachep, void *pmb)
302 pmbe->entry = PMB_NO_ENTRY; 302 pmbe->entry = PMB_NO_ENTRY;
303} 303}
304 304
305static int __init pmb_init(void) 305static int __uses_jump_to_uncached pmb_init(void)
306{ 306{
307 unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); 307 unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
308 unsigned int entry, i; 308 unsigned int entry, i;
@@ -312,7 +312,7 @@ static int __init pmb_init(void)
312 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, 312 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
313 SLAB_PANIC, pmb_cache_ctor); 313 SLAB_PANIC, pmb_cache_ctor);
314 314
315 jump_to_P2(); 315 jump_to_uncached();
316 316
317 /* 317 /*
318 * Ordering is important, P2 must be mapped in the PMB before we 318 * Ordering is important, P2 must be mapped in the PMB before we
@@ -335,7 +335,7 @@ static int __init pmb_init(void)
335 i |= MMUCR_TI; 335 i |= MMUCR_TI;
336 ctrl_outl(i, MMUCR); 336 ctrl_outl(i, MMUCR);
337 337
338 back_to_P1(); 338 back_to_cached();
339 339
340 return 0; 340 return 0;
341} 341}