diff options
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r-- | arch/sh/mm/pmb.c | 268 |
1 files changed, 134 insertions, 134 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index aade3110211..280f6a16603 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -35,29 +35,9 @@ | |||
35 | 35 | ||
36 | static void __pmb_unmap(struct pmb_entry *); | 36 | static void __pmb_unmap(struct pmb_entry *); |
37 | 37 | ||
38 | static struct kmem_cache *pmb_cache; | 38 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; |
39 | static unsigned long pmb_map; | 39 | static unsigned long pmb_map; |
40 | 40 | ||
41 | static struct pmb_entry pmb_init_map[] = { | ||
42 | /* vpn ppn flags (ub/sz/c/wt) */ | ||
43 | |||
44 | /* P1 Section Mappings */ | ||
45 | { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, }, | ||
46 | { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, }, | ||
47 | { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, }, | ||
48 | { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, }, | ||
49 | { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, }, | ||
50 | { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, }, | ||
51 | |||
52 | /* P2 Section Mappings */ | ||
53 | { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
54 | { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
55 | { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, }, | ||
56 | { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
57 | { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
58 | { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
59 | }; | ||
60 | |||
61 | static inline unsigned long mk_pmb_entry(unsigned int entry) | 41 | static inline unsigned long mk_pmb_entry(unsigned int entry) |
62 | { | 42 | { |
63 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; | 43 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; |
@@ -73,81 +53,68 @@ static inline unsigned long mk_pmb_data(unsigned int entry) | |||
73 | return mk_pmb_entry(entry) | PMB_DATA; | 53 | return mk_pmb_entry(entry) | PMB_DATA; |
74 | } | 54 | } |
75 | 55 | ||
76 | static DEFINE_SPINLOCK(pmb_list_lock); | 56 | static int pmb_alloc_entry(void) |
77 | static struct pmb_entry *pmb_list; | ||
78 | |||
79 | static inline void pmb_list_add(struct pmb_entry *pmbe) | ||
80 | { | 57 | { |
81 | struct pmb_entry **p, *tmp; | 58 | unsigned int pos; |
82 | 59 | ||
83 | p = &pmb_list; | 60 | repeat: |
84 | while ((tmp = *p) != NULL) | 61 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); |
85 | p = &tmp->next; | ||
86 | 62 | ||
87 | pmbe->next = tmp; | 63 | if (unlikely(pos > NR_PMB_ENTRIES)) |
88 | *p = pmbe; | 64 | return -ENOSPC; |
89 | } | ||
90 | 65 | ||
91 | static inline void pmb_list_del(struct pmb_entry *pmbe) | 66 | if (test_and_set_bit(pos, &pmb_map)) |
92 | { | 67 | goto repeat; |
93 | struct pmb_entry **p, *tmp; | ||
94 | 68 | ||
95 | for (p = &pmb_list; (tmp = *p); p = &tmp->next) | 69 | return pos; |
96 | if (tmp == pmbe) { | ||
97 | *p = tmp->next; | ||
98 | return; | ||
99 | } | ||
100 | } | 70 | } |
101 | 71 | ||
102 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | 72 | static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, |
103 | unsigned long flags) | 73 | unsigned long flags, int entry) |
104 | { | 74 | { |
105 | struct pmb_entry *pmbe; | 75 | struct pmb_entry *pmbe; |
76 | int pos; | ||
77 | |||
78 | if (entry == PMB_NO_ENTRY) { | ||
79 | pos = pmb_alloc_entry(); | ||
80 | if (pos < 0) | ||
81 | return ERR_PTR(pos); | ||
82 | } else { | ||
83 | if (test_bit(entry, &pmb_map)) | ||
84 | return ERR_PTR(-ENOSPC); | ||
85 | pos = entry; | ||
86 | } | ||
106 | 87 | ||
107 | pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); | 88 | pmbe = &pmb_entry_list[pos]; |
108 | if (!pmbe) | 89 | if (!pmbe) |
109 | return ERR_PTR(-ENOMEM); | 90 | return ERR_PTR(-ENOMEM); |
110 | 91 | ||
111 | pmbe->vpn = vpn; | 92 | pmbe->vpn = vpn; |
112 | pmbe->ppn = ppn; | 93 | pmbe->ppn = ppn; |
113 | pmbe->flags = flags; | 94 | pmbe->flags = flags; |
114 | 95 | pmbe->entry = pos; | |
115 | spin_lock_irq(&pmb_list_lock); | ||
116 | pmb_list_add(pmbe); | ||
117 | spin_unlock_irq(&pmb_list_lock); | ||
118 | 96 | ||
119 | return pmbe; | 97 | return pmbe; |
120 | } | 98 | } |
121 | 99 | ||
122 | void pmb_free(struct pmb_entry *pmbe) | 100 | static void pmb_free(struct pmb_entry *pmbe) |
123 | { | 101 | { |
124 | spin_lock_irq(&pmb_list_lock); | 102 | int pos = pmbe->entry; |
125 | pmb_list_del(pmbe); | ||
126 | spin_unlock_irq(&pmb_list_lock); | ||
127 | 103 | ||
128 | kmem_cache_free(pmb_cache, pmbe); | 104 | pmbe->vpn = 0; |
105 | pmbe->ppn = 0; | ||
106 | pmbe->flags = 0; | ||
107 | pmbe->entry = 0; | ||
108 | |||
109 | clear_bit(pos, &pmb_map); | ||
129 | } | 110 | } |
130 | 111 | ||
131 | /* | 112 | /* |
132 | * Must be in P2 for __set_pmb_entry() | 113 | * Must be in P2 for __set_pmb_entry() |
133 | */ | 114 | */ |
134 | int __set_pmb_entry(unsigned long vpn, unsigned long ppn, | 115 | static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, |
135 | unsigned long flags, int *entry) | 116 | unsigned long flags, int pos) |
136 | { | 117 | { |
137 | unsigned int pos = *entry; | ||
138 | |||
139 | if (unlikely(pos == PMB_NO_ENTRY)) | ||
140 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
141 | |||
142 | repeat: | ||
143 | if (unlikely(pos > NR_PMB_ENTRIES)) | ||
144 | return -ENOSPC; | ||
145 | |||
146 | if (test_and_set_bit(pos, &pmb_map)) { | ||
147 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
148 | goto repeat; | ||
149 | } | ||
150 | |||
151 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); | 118 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); |
152 | 119 | ||
153 | #ifdef CONFIG_CACHE_WRITETHROUGH | 120 | #ifdef CONFIG_CACHE_WRITETHROUGH |
@@ -161,35 +128,21 @@ repeat: | |||
161 | #endif | 128 | #endif |
162 | 129 | ||
163 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); | 130 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); |
164 | |||
165 | *entry = pos; | ||
166 | |||
167 | return 0; | ||
168 | } | 131 | } |
169 | 132 | ||
170 | int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) | 133 | static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) |
171 | { | 134 | { |
172 | int ret; | ||
173 | |||
174 | jump_to_uncached(); | 135 | jump_to_uncached(); |
175 | ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); | 136 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); |
176 | back_to_cached(); | 137 | back_to_cached(); |
177 | |||
178 | return ret; | ||
179 | } | 138 | } |
180 | 139 | ||
181 | void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | 140 | static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) |
182 | { | 141 | { |
183 | unsigned int entry = pmbe->entry; | 142 | unsigned int entry = pmbe->entry; |
184 | unsigned long addr; | 143 | unsigned long addr; |
185 | 144 | ||
186 | /* | 145 | if (unlikely(entry >= NR_PMB_ENTRIES)) |
187 | * Don't allow clearing of wired init entries, P1 or P2 access | ||
188 | * without a corresponding mapping in the PMB will lead to reset | ||
189 | * by the TLB. | ||
190 | */ | ||
191 | if (unlikely(entry < ARRAY_SIZE(pmb_init_map) || | ||
192 | entry >= NR_PMB_ENTRIES)) | ||
193 | return; | 146 | return; |
194 | 147 | ||
195 | jump_to_uncached(); | 148 | jump_to_uncached(); |
@@ -202,8 +155,6 @@ void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | |||
202 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); | 155 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); |
203 | 156 | ||
204 | back_to_cached(); | 157 | back_to_cached(); |
205 | |||
206 | clear_bit(entry, &pmb_map); | ||
207 | } | 158 | } |
208 | 159 | ||
209 | 160 | ||
@@ -239,23 +190,17 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, | |||
239 | 190 | ||
240 | again: | 191 | again: |
241 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 192 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { |
242 | int ret; | ||
243 | |||
244 | if (size < pmb_sizes[i].size) | 193 | if (size < pmb_sizes[i].size) |
245 | continue; | 194 | continue; |
246 | 195 | ||
247 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); | 196 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, |
197 | PMB_NO_ENTRY); | ||
248 | if (IS_ERR(pmbe)) { | 198 | if (IS_ERR(pmbe)) { |
249 | err = PTR_ERR(pmbe); | 199 | err = PTR_ERR(pmbe); |
250 | goto out; | 200 | goto out; |
251 | } | 201 | } |
252 | 202 | ||
253 | ret = set_pmb_entry(pmbe); | 203 | set_pmb_entry(pmbe); |
254 | if (ret != 0) { | ||
255 | pmb_free(pmbe); | ||
256 | err = -EBUSY; | ||
257 | goto out; | ||
258 | } | ||
259 | 204 | ||
260 | phys += pmb_sizes[i].size; | 205 | phys += pmb_sizes[i].size; |
261 | vaddr += pmb_sizes[i].size; | 206 | vaddr += pmb_sizes[i].size; |
@@ -292,11 +237,16 @@ out: | |||
292 | 237 | ||
293 | void pmb_unmap(unsigned long addr) | 238 | void pmb_unmap(unsigned long addr) |
294 | { | 239 | { |
295 | struct pmb_entry **p, *pmbe; | 240 | struct pmb_entry *pmbe = NULL; |
241 | int i; | ||
296 | 242 | ||
297 | for (p = &pmb_list; (pmbe = *p); p = &pmbe->next) | 243 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
298 | if (pmbe->vpn == addr) | 244 | if (test_bit(i, &pmb_map)) { |
299 | break; | 245 | pmbe = &pmb_entry_list[i]; |
246 | if (pmbe->vpn == addr) | ||
247 | break; | ||
248 | } | ||
249 | } | ||
300 | 250 | ||
301 | if (unlikely(!pmbe)) | 251 | if (unlikely(!pmbe)) |
302 | return; | 252 | return; |
@@ -306,13 +256,22 @@ void pmb_unmap(unsigned long addr) | |||
306 | 256 | ||
307 | static void __pmb_unmap(struct pmb_entry *pmbe) | 257 | static void __pmb_unmap(struct pmb_entry *pmbe) |
308 | { | 258 | { |
309 | WARN_ON(!test_bit(pmbe->entry, &pmb_map)); | 259 | BUG_ON(!test_bit(pmbe->entry, &pmb_map)); |
310 | 260 | ||
311 | do { | 261 | do { |
312 | struct pmb_entry *pmblink = pmbe; | 262 | struct pmb_entry *pmblink = pmbe; |
313 | 263 | ||
314 | if (pmbe->entry != PMB_NO_ENTRY) | 264 | /* |
315 | clear_pmb_entry(pmbe); | 265 | * We may be called before this pmb_entry has been |
266 | * entered into the PMB table via set_pmb_entry(), but | ||
267 | * that's OK because we've allocated a unique slot for | ||
268 | * this entry in pmb_alloc() (even if we haven't filled | ||
269 | * it yet). | ||
270 | * | ||
271 | * Therefore, calling clear_pmb_entry() is safe as no | ||
272 | * other mapping can be using that slot. | ||
273 | */ | ||
274 | clear_pmb_entry(pmbe); | ||
316 | 275 | ||
317 | pmbe = pmblink->link; | 276 | pmbe = pmblink->link; |
318 | 277 | ||
@@ -320,42 +279,34 @@ static void __pmb_unmap(struct pmb_entry *pmbe) | |||
320 | } while (pmbe); | 279 | } while (pmbe); |
321 | } | 280 | } |
322 | 281 | ||
323 | static void pmb_cache_ctor(void *pmb) | 282 | #ifdef CONFIG_PMB |
283 | int __uses_jump_to_uncached pmb_init(void) | ||
324 | { | 284 | { |
325 | struct pmb_entry *pmbe = pmb; | 285 | unsigned int i; |
326 | 286 | long size, ret; | |
327 | memset(pmb, 0, sizeof(struct pmb_entry)); | ||
328 | |||
329 | pmbe->entry = PMB_NO_ENTRY; | ||
330 | } | ||
331 | |||
332 | static int __uses_jump_to_uncached pmb_init(void) | ||
333 | { | ||
334 | unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); | ||
335 | unsigned int entry, i; | ||
336 | |||
337 | BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); | ||
338 | |||
339 | pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, | ||
340 | SLAB_PANIC, pmb_cache_ctor); | ||
341 | 287 | ||
342 | jump_to_uncached(); | 288 | jump_to_uncached(); |
343 | 289 | ||
344 | /* | 290 | /* |
345 | * Ordering is important, P2 must be mapped in the PMB before we | 291 | * Insert PMB entries for the P1 and P2 areas so that, after |
346 | * can set PMB.SE, and P1 must be mapped before we jump back to | 292 | * we've switched the MMU to 32-bit mode, the semantics of P1 |
347 | * P1 space. | 293 | * and P2 are the same as in 29-bit mode, e.g. |
294 | * | ||
295 | * P1 - provides a cached window onto physical memory | ||
296 | * P2 - provides an uncached window onto physical memory | ||
348 | */ | 297 | */ |
349 | for (entry = 0; entry < nr_entries; entry++) { | 298 | size = __MEMORY_START + __MEMORY_SIZE; |
350 | struct pmb_entry *pmbe = pmb_init_map + entry; | ||
351 | 299 | ||
352 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry); | 300 | ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); |
353 | } | 301 | BUG_ON(ret != size); |
302 | |||
303 | ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); | ||
304 | BUG_ON(ret != size); | ||
354 | 305 | ||
355 | ctrl_outl(0, PMB_IRMCR); | 306 | ctrl_outl(0, PMB_IRMCR); |
356 | 307 | ||
357 | /* PMB.SE and UB[7] */ | 308 | /* PMB.SE and UB[7] */ |
358 | ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR); | 309 | ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); |
359 | 310 | ||
360 | /* Flush out the TLB */ | 311 | /* Flush out the TLB */ |
361 | i = ctrl_inl(MMUCR); | 312 | i = ctrl_inl(MMUCR); |
@@ -366,7 +317,53 @@ static int __uses_jump_to_uncached pmb_init(void) | |||
366 | 317 | ||
367 | return 0; | 318 | return 0; |
368 | } | 319 | } |
369 | arch_initcall(pmb_init); | 320 | #else |
321 | int __uses_jump_to_uncached pmb_init(void) | ||
322 | { | ||
323 | int i; | ||
324 | unsigned long addr, data; | ||
325 | |||
326 | jump_to_uncached(); | ||
327 | |||
328 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | ||
329 | struct pmb_entry *pmbe; | ||
330 | unsigned long vpn, ppn, flags; | ||
331 | |||
332 | addr = PMB_DATA + (i << PMB_E_SHIFT); | ||
333 | data = ctrl_inl(addr); | ||
334 | if (!(data & PMB_V)) | ||
335 | continue; | ||
336 | |||
337 | if (data & PMB_C) { | ||
338 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
339 | data |= PMB_WT; | ||
340 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
341 | data &= ~PMB_WT; | ||
342 | #else | ||
343 | data &= ~(PMB_C | PMB_WT); | ||
344 | #endif | ||
345 | } | ||
346 | ctrl_outl(data, addr); | ||
347 | |||
348 | ppn = data & PMB_PFN_MASK; | ||
349 | |||
350 | flags = data & (PMB_C | PMB_WT | PMB_UB); | ||
351 | flags |= data & PMB_SZ_MASK; | ||
352 | |||
353 | addr = PMB_ADDR + (i << PMB_E_SHIFT); | ||
354 | data = ctrl_inl(addr); | ||
355 | |||
356 | vpn = data & PMB_PFN_MASK; | ||
357 | |||
358 | pmbe = pmb_alloc(vpn, ppn, flags, i); | ||
359 | WARN_ON(IS_ERR(pmbe)); | ||
360 | } | ||
361 | |||
362 | back_to_cached(); | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | #endif /* CONFIG_PMB */ | ||
370 | 367 | ||
371 | static int pmb_seq_show(struct seq_file *file, void *iter) | 368 | static int pmb_seq_show(struct seq_file *file, void *iter) |
372 | { | 369 | { |
@@ -434,15 +431,18 @@ postcore_initcall(pmb_debugfs_init); | |||
434 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) | 431 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) |
435 | { | 432 | { |
436 | static pm_message_t prev_state; | 433 | static pm_message_t prev_state; |
434 | int i; | ||
437 | 435 | ||
438 | /* Restore the PMB after a resume from hibernation */ | 436 | /* Restore the PMB after a resume from hibernation */ |
439 | if (state.event == PM_EVENT_ON && | 437 | if (state.event == PM_EVENT_ON && |
440 | prev_state.event == PM_EVENT_FREEZE) { | 438 | prev_state.event == PM_EVENT_FREEZE) { |
441 | struct pmb_entry *pmbe; | 439 | struct pmb_entry *pmbe; |
442 | spin_lock_irq(&pmb_list_lock); | 440 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
443 | for (pmbe = pmb_list; pmbe; pmbe = pmbe->next) | 441 | if (test_bit(i, &pmb_map)) { |
444 | set_pmb_entry(pmbe); | 442 | pmbe = &pmb_entry_list[i]; |
445 | spin_unlock_irq(&pmb_list_lock); | 443 | set_pmb_entry(pmbe); |
444 | } | ||
445 | } | ||
446 | } | 446 | } |
447 | prev_state = state; | 447 | prev_state = state; |
448 | return 0; | 448 | return 0; |