aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-02-17 23:26:05 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-02-17 23:26:05 -0500
commit2e450643d70b62e0192577681b227d7d5d2efa45 (patch)
tree4ecb8fdb3177d2429e2f72202ad88cdccdaed6ae /arch
parentb8f7918f332873a79e4c820e90e7a245ce4d3042 (diff)
sh: Use uncached I/O helpers in PMB setup.
The PMB code is an example of something that spends an absurd amount of time running uncached when only a couple of operations really need to be. This switches over to the shiny new uncached helpers, permitting us to spend far more time running cached. Additionally, MMUCR twiddling is perfectly safe from cached space given that it's paired with a control register barrier, so fix that up, too. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/mm/pmb.c46
1 files changed, 19 insertions, 27 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index e65e8b8e2a5e..b9d5476e1284 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -161,32 +161,28 @@ static __always_inline unsigned long pmb_cache_flags(void)
161 */ 161 */
162static void __set_pmb_entry(struct pmb_entry *pmbe) 162static void __set_pmb_entry(struct pmb_entry *pmbe)
163{ 163{
164 jump_to_uncached();
165
166 pmbe->flags &= ~PMB_CACHE_MASK; 164 pmbe->flags &= ~PMB_CACHE_MASK;
167 pmbe->flags |= pmb_cache_flags(); 165 pmbe->flags |= pmb_cache_flags();
168 166
169 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); 167 writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
170 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry)); 168 writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
171 169 mk_pmb_data(pmbe->entry));
172 back_to_cached();
173} 170}
174 171
175static void __clear_pmb_entry(struct pmb_entry *pmbe) 172static void __clear_pmb_entry(struct pmb_entry *pmbe)
176{ 173{
177 unsigned int entry = pmbe->entry; 174 unsigned long addr, data;
178 unsigned long addr; 175 unsigned long addr_val, data_val;
179 176
180 jump_to_uncached(); 177 addr = mk_pmb_addr(pmbe->entry);
178 data = mk_pmb_data(pmbe->entry);
181 179
182 /* Clear V-bit */ 180 addr_val = __raw_readl(addr);
183 addr = mk_pmb_addr(entry); 181 data_val = __raw_readl(data);
184 __raw_writel(__raw_readl(addr) & ~PMB_V, addr);
185 182
186 addr = mk_pmb_data(entry); 183 /* Clear V-bit */
187 __raw_writel(__raw_readl(addr) & ~PMB_V, addr); 184 writel_uncached(addr_val & ~PMB_V, addr);
188 185 writel_uncached(data_val & ~PMB_V, data);
189 back_to_cached();
190} 186}
191 187
192static void set_pmb_entry(struct pmb_entry *pmbe) 188static void set_pmb_entry(struct pmb_entry *pmbe)
@@ -400,8 +396,8 @@ static int pmb_synchronize_mappings(void)
400 /* 396 /*
401 * Invalidate anything out of bounds. 397 * Invalidate anything out of bounds.
402 */ 398 */
403 __raw_writel(addr_val & ~PMB_V, addr); 399 writel_uncached(addr_val & ~PMB_V, addr);
404 __raw_writel(data_val & ~PMB_V, data); 400 writel_uncached(data_val & ~PMB_V, data);
405 continue; 401 continue;
406 } 402 }
407 403
@@ -411,7 +407,8 @@ static int pmb_synchronize_mappings(void)
411 if (data_val & PMB_C) { 407 if (data_val & PMB_C) {
412 data_val &= ~PMB_CACHE_MASK; 408 data_val &= ~PMB_CACHE_MASK;
413 data_val |= pmb_cache_flags(); 409 data_val |= pmb_cache_flags();
414 __raw_writel(data_val, data); 410
411 writel_uncached(data_val, data);
415 } 412 }
416 413
417 size = data_val & PMB_SZ_MASK; 414 size = data_val & PMB_SZ_MASK;
@@ -462,25 +459,20 @@ int pmb_init(void)
462{ 459{
463 int ret; 460 int ret;
464 461
465 jump_to_uncached();
466
467 /* 462 /*
468 * Sync our software copy of the PMB mappings with those in 463 * Sync our software copy of the PMB mappings with those in
469 * hardware. The mappings in the hardware PMB were either set up 464 * hardware. The mappings in the hardware PMB were either set up
470 * by the bootloader or very early on by the kernel. 465 * by the bootloader or very early on by the kernel.
471 */ 466 */
472 ret = pmb_synchronize_mappings(); 467 ret = pmb_synchronize_mappings();
473 if (unlikely(ret == 0)) { 468 if (unlikely(ret == 0))
474 back_to_cached();
475 return 0; 469 return 0;
476 }
477 470
478 __raw_writel(0, PMB_IRMCR); 471 writel_uncached(0, PMB_IRMCR);
479 472
480 /* Flush out the TLB */ 473 /* Flush out the TLB */
481 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); 474 __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
482 475 ctrl_barrier();
483 back_to_cached();
484 476
485 return 0; 477 return 0;
486} 478}