aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-02-17 01:33:30 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-02-17 01:33:30 -0500
commit51becfd96287b3913b13075699433730984e2f4f (patch)
tree2105a0a34e99ee872637ab2f15a8e5c8d890715a /arch
parent7bdda6209f224aa784a036df54b22cb338d2e859 (diff)
sh: PMB tidying.
Some overdue cleanup of the PMB code, killing off unused functionality and duplication sprinkled about the tree. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/include/asm/mmu.h4
-rw-r--r--arch/sh/kernel/head_32.S2
-rw-r--r--arch/sh/mm/pmb.c83
3 files changed, 42 insertions, 47 deletions
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index 151bc922701b..44c904341414 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -11,7 +11,9 @@
11 11
12#define PMB_ADDR 0xf6100000 12#define PMB_ADDR 0xf6100000
13#define PMB_DATA 0xf7100000 13#define PMB_DATA 0xf7100000
14#define PMB_ENTRY_MAX 16 14
15#define NR_PMB_ENTRIES 16
16
15#define PMB_E_MASK 0x0000000f 17#define PMB_E_MASK 0x0000000f
16#define PMB_E_SHIFT 8 18#define PMB_E_SHIFT 8
17 19
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index 83f2b84b58da..91ae76277d8f 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -236,7 +236,7 @@ ENTRY(_stext)
236 * r10 = number of entries we've setup so far 236 * r10 = number of entries we've setup so far
237 */ 237 */
238 mov #0, r1 238 mov #0, r1
239 mov #PMB_ENTRY_MAX, r0 239 mov #NR_PMB_ENTRIES, r0
240 240
241.Lagain: 241.Lagain:
242 mov.l r1, @r3 /* Clear PMB_ADDR entry */ 242 mov.l r1, @r3 /* Clear PMB_ADDR entry */
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 509a444a30ab..924f3e4b3a82 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -21,32 +21,31 @@
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/io.h>
25#include <asm/sizes.h>
24#include <asm/system.h> 26#include <asm/system.h>
25#include <asm/uaccess.h> 27#include <asm/uaccess.h>
26#include <asm/pgtable.h> 28#include <asm/pgtable.h>
27#include <asm/page.h> 29#include <asm/page.h>
28#include <asm/mmu.h> 30#include <asm/mmu.h>
29#include <asm/io.h>
30#include <asm/mmu_context.h> 31#include <asm/mmu_context.h>
31 32
32#define NR_PMB_ENTRIES 16 33static void pmb_unmap_entry(struct pmb_entry *);
33
34static void __pmb_unmap(struct pmb_entry *);
35 34
36static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; 35static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
37static unsigned long pmb_map; 36static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
38 37
39static inline unsigned long mk_pmb_entry(unsigned int entry) 38static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
40{ 39{
41 return (entry & PMB_E_MASK) << PMB_E_SHIFT; 40 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
42} 41}
43 42
44static inline unsigned long mk_pmb_addr(unsigned int entry) 43static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
45{ 44{
46 return mk_pmb_entry(entry) | PMB_ADDR; 45 return mk_pmb_entry(entry) | PMB_ADDR;
47} 46}
48 47
49static inline unsigned long mk_pmb_data(unsigned int entry) 48static __always_inline unsigned long mk_pmb_data(unsigned int entry)
50{ 49{
51 return mk_pmb_entry(entry) | PMB_DATA; 50 return mk_pmb_entry(entry) | PMB_DATA;
52} 51}
@@ -56,12 +55,12 @@ static int pmb_alloc_entry(void)
56 unsigned int pos; 55 unsigned int pos;
57 56
58repeat: 57repeat:
59 pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); 58 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
60 59
61 if (unlikely(pos > NR_PMB_ENTRIES)) 60 if (unlikely(pos > NR_PMB_ENTRIES))
62 return -ENOSPC; 61 return -ENOSPC;
63 62
64 if (test_and_set_bit(pos, &pmb_map)) 63 if (test_and_set_bit(pos, pmb_map))
65 goto repeat; 64 goto repeat;
66 65
67 return pos; 66 return pos;
@@ -78,7 +77,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
78 if (pos < 0) 77 if (pos < 0)
79 return ERR_PTR(pos); 78 return ERR_PTR(pos);
80 } else { 79 } else {
81 if (test_and_set_bit(entry, &pmb_map)) 80 if (test_and_set_bit(entry, pmb_map))
82 return ERR_PTR(-ENOSPC); 81 return ERR_PTR(-ENOSPC);
83 pos = entry; 82 pos = entry;
84 } 83 }
@@ -104,16 +103,17 @@ static void pmb_free(struct pmb_entry *pmbe)
104 pmbe->flags = 0; 103 pmbe->flags = 0;
105 pmbe->entry = 0; 104 pmbe->entry = 0;
106 105
107 clear_bit(pos, &pmb_map); 106 clear_bit(pos, pmb_map);
108} 107}
109 108
110/* 109/*
111 * Must be in P2 for __set_pmb_entry() 110 * Must be run uncached.
112 */ 111 */
113static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, 112static void set_pmb_entry(struct pmb_entry *pmbe)
114 unsigned long flags, int pos)
115{ 113{
116 __raw_writel(vpn | PMB_V, mk_pmb_addr(pos)); 114 jump_to_uncached();
115
116 __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
117 117
118#ifdef CONFIG_CACHE_WRITETHROUGH 118#ifdef CONFIG_CACHE_WRITETHROUGH
119 /* 119 /*
@@ -121,17 +121,12 @@ static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
121 * invalid, so care must be taken to manually adjust cacheable 121 * invalid, so care must be taken to manually adjust cacheable
122 * translations. 122 * translations.
123 */ 123 */
124 if (likely(flags & PMB_C)) 124 if (likely(pmbe->flags & PMB_C))
125 flags |= PMB_WT; 125 pmbe->flags |= PMB_WT;
126#endif 126#endif
127 127
128 __raw_writel(ppn | flags | PMB_V, mk_pmb_data(pos)); 128 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
129}
130 129
131static void set_pmb_entry(struct pmb_entry *pmbe)
132{
133 jump_to_uncached();
134 __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
135 back_to_cached(); 130 back_to_cached();
136} 131}
137 132
@@ -140,9 +135,6 @@ static void clear_pmb_entry(struct pmb_entry *pmbe)
140 unsigned int entry = pmbe->entry; 135 unsigned int entry = pmbe->entry;
141 unsigned long addr; 136 unsigned long addr;
142 137
143 if (unlikely(entry >= NR_PMB_ENTRIES))
144 return;
145
146 jump_to_uncached(); 138 jump_to_uncached();
147 139
148 /* Clear V-bit */ 140 /* Clear V-bit */
@@ -155,15 +147,14 @@ static void clear_pmb_entry(struct pmb_entry *pmbe)
155 back_to_cached(); 147 back_to_cached();
156} 148}
157 149
158
159static struct { 150static struct {
160 unsigned long size; 151 unsigned long size;
161 int flag; 152 int flag;
162} pmb_sizes[] = { 153} pmb_sizes[] = {
163 { .size = 0x20000000, .flag = PMB_SZ_512M, }, 154 { .size = SZ_512M, .flag = PMB_SZ_512M, },
164 { .size = 0x08000000, .flag = PMB_SZ_128M, }, 155 { .size = SZ_128M, .flag = PMB_SZ_128M, },
165 { .size = 0x04000000, .flag = PMB_SZ_64M, }, 156 { .size = SZ_64M, .flag = PMB_SZ_64M, },
166 { .size = 0x01000000, .flag = PMB_SZ_16M, }, 157 { .size = SZ_16M, .flag = PMB_SZ_16M, },
167}; 158};
168 159
169long pmb_remap(unsigned long vaddr, unsigned long phys, 160long pmb_remap(unsigned long vaddr, unsigned long phys,
@@ -230,34 +221,36 @@ again:
230 return wanted - size; 221 return wanted - size;
231 222
232out: 223out:
233 if (pmbp) 224 pmb_unmap_entry(pmbp);
234 __pmb_unmap(pmbp);
235 225
236 return err; 226 return err;
237} 227}
238 228
239void pmb_unmap(unsigned long addr) 229void pmb_unmap(unsigned long addr)
240{ 230{
241 struct pmb_entry *pmbe = NULL; 231 struct pmb_entry *pmbe;
242 int i; 232 int i;
243 233
244 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { 234 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
245 if (test_bit(i, &pmb_map)) { 235 if (test_bit(i, pmb_map)) {
246 pmbe = &pmb_entry_list[i]; 236 pmbe = &pmb_entry_list[i];
247 if (pmbe->vpn == addr) 237 if (pmbe->vpn == addr) {
238 pmb_unmap_entry(pmbe);
248 break; 239 break;
240 }
249 } 241 }
250 } 242 }
243}
251 244
245static void pmb_unmap_entry(struct pmb_entry *pmbe)
246{
252 if (unlikely(!pmbe)) 247 if (unlikely(!pmbe))
253 return; 248 return;
254 249
255 __pmb_unmap(pmbe); 250 if (!test_bit(pmbe->entry, pmb_map)) {
256} 251 WARN_ON(1);
257 252 return;
258static void __pmb_unmap(struct pmb_entry *pmbe) 253 }
259{
260 BUG_ON(!test_bit(pmbe->entry, &pmb_map));
261 254
262 do { 255 do {
263 struct pmb_entry *pmblink = pmbe; 256 struct pmb_entry *pmblink = pmbe;
@@ -326,7 +319,7 @@ static int pmb_synchronize_mappings(void)
326 * jumping between the cached and uncached mappings and tearing 319 * jumping between the cached and uncached mappings and tearing
327 * down alternating mappings while executing from the other. 320 * down alternating mappings while executing from the other.
328 */ 321 */
329 for (i = 0; i < PMB_ENTRY_MAX; i++) { 322 for (i = 0; i < NR_PMB_ENTRIES; i++) {
330 unsigned long addr, data; 323 unsigned long addr, data;
331 unsigned long addr_val, data_val; 324 unsigned long addr_val, data_val;
332 unsigned long ppn, vpn, flags; 325 unsigned long ppn, vpn, flags;
@@ -494,7 +487,7 @@ static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
494 prev_state.event == PM_EVENT_FREEZE) { 487 prev_state.event == PM_EVENT_FREEZE) {
495 struct pmb_entry *pmbe; 488 struct pmb_entry *pmbe;
496 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { 489 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
497 if (test_bit(i, &pmb_map)) { 490 if (test_bit(i, pmb_map)) {
498 pmbe = &pmb_entry_list[i]; 491 pmbe = &pmb_entry_list[i];
499 set_pmb_entry(pmbe); 492 set_pmb_entry(pmbe);
500 } 493 }