aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/pmb.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-02-17 04:05:23 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-02-17 04:05:23 -0500
commit0065b96775f1eff167a2c3343a41582e8fab4c6c (patch)
tree21772c942c64488442c20695f135faec1738c6bb /arch/sh/mm/pmb.c
parentd7813bc9e8e384f5a293b05c095c799d41af3668 (diff)
sh: Fix up dynamically created write-through PMB mappings.
Write-through PMB mappings still require the cache bit to be set, even if they're to be flagged with a different cache policy and bufferability bit. To reduce some of the confusion surrounding the flag encoding we centralize the cache mask based on the system cache policy while we're at it. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r--arch/sh/mm/pmb.c56
1 files changed, 32 insertions, 24 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index f2ad6e374b64..cb808a8aaffc 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -102,24 +102,36 @@ static void pmb_free(struct pmb_entry *pmbe)
102} 102}
103 103
104/* 104/*
105 * Ensure that the PMB entries match our cache configuration.
106 *
107 * When we are in 32-bit address extended mode, CCR.CB becomes
108 * invalid, so care must be taken to manually adjust cacheable
109 * translations.
110 */
111static __always_inline unsigned long pmb_cache_flags(void)
112{
113 unsigned long flags = 0;
114
115#if defined(CONFIG_CACHE_WRITETHROUGH)
116 flags |= PMB_C | PMB_WT | PMB_UB;
117#elif defined(CONFIG_CACHE_WRITEBACK)
118 flags |= PMB_C;
119#endif
120
121 return flags;
122}
123
124/*
105 * Must be run uncached. 125 * Must be run uncached.
106 */ 126 */
107static void set_pmb_entry(struct pmb_entry *pmbe) 127static void set_pmb_entry(struct pmb_entry *pmbe)
108{ 128{
109 jump_to_uncached(); 129 jump_to_uncached();
110 130
111 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); 131 pmbe->flags &= ~PMB_CACHE_MASK;
112 132 pmbe->flags |= pmb_cache_flags();
113#ifdef CONFIG_CACHE_WRITETHROUGH
114 /*
115 * When we are in 32-bit address extended mode, CCR.CB becomes
116 * invalid, so care must be taken to manually adjust cacheable
117 * translations.
118 */
119 if (likely(pmbe->flags & PMB_C))
120 pmbe->flags |= PMB_WT;
121#endif
122 133
134 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
123 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry)); 135 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
124 136
125 back_to_cached(); 137 back_to_cached();
@@ -163,14 +175,15 @@ long pmb_remap(unsigned long vaddr, unsigned long phys,
163 175
164 flags = pgprot_val(prot); 176 flags = pgprot_val(prot);
165 177
178 pmb_flags = PMB_WT | PMB_UB;
179
166 /* Convert typical pgprot value to the PMB equivalent */ 180 /* Convert typical pgprot value to the PMB equivalent */
167 if (flags & _PAGE_CACHABLE) { 181 if (flags & _PAGE_CACHABLE) {
168 if (flags & _PAGE_WT) 182 pmb_flags |= PMB_C;
169 pmb_flags = PMB_WT; 183
170 else 184 if ((flags & _PAGE_WT) == 0)
171 pmb_flags = PMB_C; 185 pmb_flags &= ~(PMB_WT | PMB_UB);
172 } else 186 }
173 pmb_flags = PMB_WT | PMB_UB;
174 187
175 pmbp = NULL; 188 pmbp = NULL;
176 wanted = size; 189 wanted = size;
@@ -337,13 +350,8 @@ static int pmb_synchronize_mappings(void)
337 * Update the caching attributes if necessary 350 * Update the caching attributes if necessary
338 */ 351 */
339 if (data_val & PMB_C) { 352 if (data_val & PMB_C) {
340#if defined(CONFIG_CACHE_WRITETHROUGH) 353 data_val &= ~PMB_CACHE_MASK;
341 data_val |= PMB_WT; 354 data_val |= pmb_cache_flags();
342#elif defined(CONFIG_CACHE_WRITEBACK)
343 data_val &= ~PMB_WT;
344#else
345 data_val &= ~(PMB_C | PMB_WT);
346#endif
347 __raw_writel(data_val, data); 355 __raw_writel(data_val, data);
348 } 356 }
349 357