diff options
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r-- | arch/sh/mm/pmb.c | 256 |
1 files changed, 158 insertions, 98 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 35b364f931ea..9a516b89839a 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
26 | #include <linux/vmalloc.h> | ||
26 | #include <asm/sizes.h> | 27 | #include <asm/sizes.h> |
27 | #include <asm/system.h> | 28 | #include <asm/system.h> |
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
@@ -51,6 +52,16 @@ struct pmb_entry { | |||
51 | struct pmb_entry *link; | 52 | struct pmb_entry *link; |
52 | }; | 53 | }; |
53 | 54 | ||
55 | static struct { | ||
56 | unsigned long size; | ||
57 | int flag; | ||
58 | } pmb_sizes[] = { | ||
59 | { .size = SZ_512M, .flag = PMB_SZ_512M, }, | ||
60 | { .size = SZ_128M, .flag = PMB_SZ_128M, }, | ||
61 | { .size = SZ_64M, .flag = PMB_SZ_64M, }, | ||
62 | { .size = SZ_16M, .flag = PMB_SZ_16M, }, | ||
63 | }; | ||
64 | |||
54 | static void pmb_unmap_entry(struct pmb_entry *, int depth); | 65 | static void pmb_unmap_entry(struct pmb_entry *, int depth); |
55 | 66 | ||
56 | static DEFINE_RWLOCK(pmb_rwlock); | 67 | static DEFINE_RWLOCK(pmb_rwlock); |
@@ -72,6 +83,88 @@ static __always_inline unsigned long mk_pmb_data(unsigned int entry) | |||
72 | return mk_pmb_entry(entry) | PMB_DATA; | 83 | return mk_pmb_entry(entry) | PMB_DATA; |
73 | } | 84 | } |
74 | 85 | ||
86 | static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) | ||
87 | { | ||
88 | return ppn >= __pa(memory_start) && ppn < __pa(memory_end); | ||
89 | } | ||
90 | |||
91 | /* | ||
92 | * Ensure that the PMB entries match our cache configuration. | ||
93 | * | ||
94 | * When we are in 32-bit address extended mode, CCR.CB becomes | ||
95 | * invalid, so care must be taken to manually adjust cacheable | ||
96 | * translations. | ||
97 | */ | ||
98 | static __always_inline unsigned long pmb_cache_flags(void) | ||
99 | { | ||
100 | unsigned long flags = 0; | ||
101 | |||
102 | #if defined(CONFIG_CACHE_OFF) | ||
103 | flags |= PMB_WT | PMB_UB; | ||
104 | #elif defined(CONFIG_CACHE_WRITETHROUGH) | ||
105 | flags |= PMB_C | PMB_WT | PMB_UB; | ||
106 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
107 | flags |= PMB_C; | ||
108 | #endif | ||
109 | |||
110 | return flags; | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Convert typical pgprot value to the PMB equivalent | ||
115 | */ | ||
116 | static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot) | ||
117 | { | ||
118 | unsigned long pmb_flags = 0; | ||
119 | u64 flags = pgprot_val(prot); | ||
120 | |||
121 | if (flags & _PAGE_CACHABLE) | ||
122 | pmb_flags |= PMB_C; | ||
123 | if (flags & _PAGE_WT) | ||
124 | pmb_flags |= PMB_WT | PMB_UB; | ||
125 | |||
126 | return pmb_flags; | ||
127 | } | ||
128 | |||
129 | static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) | ||
130 | { | ||
131 | return (b->vpn == (a->vpn + a->size)) && | ||
132 | (b->ppn == (a->ppn + a->size)) && | ||
133 | (b->flags == a->flags); | ||
134 | } | ||
135 | |||
136 | static bool pmb_size_valid(unsigned long size) | ||
137 | { | ||
138 | int i; | ||
139 | |||
140 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
141 | if (pmb_sizes[i].size == size) | ||
142 | return true; | ||
143 | |||
144 | return false; | ||
145 | } | ||
146 | |||
147 | static inline bool pmb_addr_valid(unsigned long addr, unsigned long size) | ||
148 | { | ||
149 | return (addr >= P1SEG && (addr + size - 1) < P3SEG); | ||
150 | } | ||
151 | |||
152 | static inline bool pmb_prot_valid(pgprot_t prot) | ||
153 | { | ||
154 | return (pgprot_val(prot) & _PAGE_USER) == 0; | ||
155 | } | ||
156 | |||
157 | static int pmb_size_to_flags(unsigned long size) | ||
158 | { | ||
159 | int i; | ||
160 | |||
161 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
162 | if (pmb_sizes[i].size == size) | ||
163 | return pmb_sizes[i].flag; | ||
164 | |||
165 | return 0; | ||
166 | } | ||
167 | |||
75 | static int pmb_alloc_entry(void) | 168 | static int pmb_alloc_entry(void) |
76 | { | 169 | { |
77 | int pos; | 170 | int pos; |
@@ -139,33 +232,13 @@ static void pmb_free(struct pmb_entry *pmbe) | |||
139 | } | 232 | } |
140 | 233 | ||
141 | /* | 234 | /* |
142 | * Ensure that the PMB entries match our cache configuration. | ||
143 | * | ||
144 | * When we are in 32-bit address extended mode, CCR.CB becomes | ||
145 | * invalid, so care must be taken to manually adjust cacheable | ||
146 | * translations. | ||
147 | */ | ||
148 | static __always_inline unsigned long pmb_cache_flags(void) | ||
149 | { | ||
150 | unsigned long flags = 0; | ||
151 | |||
152 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
153 | flags |= PMB_C | PMB_WT | PMB_UB; | ||
154 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
155 | flags |= PMB_C; | ||
156 | #endif | ||
157 | |||
158 | return flags; | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Must be run uncached. | 235 | * Must be run uncached. |
163 | */ | 236 | */ |
164 | static void __set_pmb_entry(struct pmb_entry *pmbe) | 237 | static void __set_pmb_entry(struct pmb_entry *pmbe) |
165 | { | 238 | { |
166 | writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); | 239 | /* Set V-bit */ |
167 | writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, | 240 | __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry)); |
168 | mk_pmb_data(pmbe->entry)); | 241 | __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); |
169 | } | 242 | } |
170 | 243 | ||
171 | static void __clear_pmb_entry(struct pmb_entry *pmbe) | 244 | static void __clear_pmb_entry(struct pmb_entry *pmbe) |
@@ -193,39 +266,56 @@ static void set_pmb_entry(struct pmb_entry *pmbe) | |||
193 | spin_unlock_irqrestore(&pmbe->lock, flags); | 266 | spin_unlock_irqrestore(&pmbe->lock, flags); |
194 | } | 267 | } |
195 | 268 | ||
196 | static struct { | 269 | int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, |
197 | unsigned long size; | 270 | unsigned long size, pgprot_t prot) |
198 | int flag; | 271 | { |
199 | } pmb_sizes[] = { | 272 | return 0; |
200 | { .size = SZ_512M, .flag = PMB_SZ_512M, }, | 273 | } |
201 | { .size = SZ_128M, .flag = PMB_SZ_128M, }, | ||
202 | { .size = SZ_64M, .flag = PMB_SZ_64M, }, | ||
203 | { .size = SZ_16M, .flag = PMB_SZ_16M, }, | ||
204 | }; | ||
205 | 274 | ||
206 | long pmb_remap(unsigned long vaddr, unsigned long phys, | 275 | void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, |
207 | unsigned long size, pgprot_t prot) | 276 | pgprot_t prot, void *caller) |
208 | { | 277 | { |
209 | struct pmb_entry *pmbp, *pmbe; | 278 | struct pmb_entry *pmbp, *pmbe; |
210 | unsigned long wanted; | 279 | unsigned long pmb_flags; |
211 | int pmb_flags, i; | 280 | int i, mapped; |
212 | long err; | 281 | unsigned long orig_addr, vaddr; |
213 | u64 flags; | 282 | phys_addr_t offset, last_addr; |
283 | phys_addr_t align_mask; | ||
284 | unsigned long aligned; | ||
285 | struct vm_struct *area; | ||
214 | 286 | ||
215 | flags = pgprot_val(prot); | 287 | /* |
288 | * Small mappings need to go through the TLB. | ||
289 | */ | ||
290 | if (size < SZ_16M) | ||
291 | return ERR_PTR(-EINVAL); | ||
292 | if (!pmb_prot_valid(prot)) | ||
293 | return ERR_PTR(-EINVAL); | ||
216 | 294 | ||
217 | pmb_flags = PMB_WT | PMB_UB; | 295 | pmbp = NULL; |
296 | pmb_flags = pgprot_to_pmb_flags(prot); | ||
297 | mapped = 0; | ||
218 | 298 | ||
219 | /* Convert typical pgprot value to the PMB equivalent */ | 299 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) |
220 | if (flags & _PAGE_CACHABLE) { | 300 | if (size >= pmb_sizes[i].size) |
221 | pmb_flags |= PMB_C; | 301 | break; |
222 | 302 | ||
223 | if ((flags & _PAGE_WT) == 0) | 303 | last_addr = phys + size; |
224 | pmb_flags &= ~(PMB_WT | PMB_UB); | 304 | align_mask = ~(pmb_sizes[i].size - 1); |
225 | } | 305 | offset = phys & ~align_mask; |
306 | phys &= align_mask; | ||
307 | aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys; | ||
226 | 308 | ||
227 | pmbp = NULL; | 309 | area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end, |
228 | wanted = size; | 310 | P3SEG, caller); |
311 | if (!area) | ||
312 | return NULL; | ||
313 | |||
314 | area->phys_addr = phys; | ||
315 | orig_addr = vaddr = (unsigned long)area->addr; | ||
316 | |||
317 | if (!pmb_addr_valid(vaddr, aligned)) | ||
318 | return ERR_PTR(-EFAULT); | ||
229 | 319 | ||
230 | again: | 320 | again: |
231 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 321 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { |
@@ -237,19 +327,19 @@ again: | |||
237 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, | 327 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, |
238 | PMB_NO_ENTRY); | 328 | PMB_NO_ENTRY); |
239 | if (IS_ERR(pmbe)) { | 329 | if (IS_ERR(pmbe)) { |
240 | err = PTR_ERR(pmbe); | 330 | pmb_unmap_entry(pmbp, mapped); |
241 | goto out; | 331 | return pmbe; |
242 | } | 332 | } |
243 | 333 | ||
244 | spin_lock_irqsave(&pmbe->lock, flags); | 334 | spin_lock_irqsave(&pmbe->lock, flags); |
245 | 335 | ||
336 | pmbe->size = pmb_sizes[i].size; | ||
337 | |||
246 | __set_pmb_entry(pmbe); | 338 | __set_pmb_entry(pmbe); |
247 | 339 | ||
248 | phys += pmb_sizes[i].size; | 340 | phys += pmbe->size; |
249 | vaddr += pmb_sizes[i].size; | 341 | vaddr += pmbe->size; |
250 | size -= pmb_sizes[i].size; | 342 | size -= pmbe->size; |
251 | |||
252 | pmbe->size = pmb_sizes[i].size; | ||
253 | 343 | ||
254 | /* | 344 | /* |
255 | * Link adjacent entries that span multiple PMB entries | 345 | * Link adjacent entries that span multiple PMB entries |
@@ -269,6 +359,7 @@ again: | |||
269 | * pmb_sizes[i].size again. | 359 | * pmb_sizes[i].size again. |
270 | */ | 360 | */ |
271 | i--; | 361 | i--; |
362 | mapped++; | ||
272 | 363 | ||
273 | spin_unlock_irqrestore(&pmbe->lock, flags); | 364 | spin_unlock_irqrestore(&pmbe->lock, flags); |
274 | } | 365 | } |
@@ -276,61 +367,35 @@ again: | |||
276 | if (size >= SZ_16M) | 367 | if (size >= SZ_16M) |
277 | goto again; | 368 | goto again; |
278 | 369 | ||
279 | return wanted - size; | 370 | return (void __iomem *)(offset + (char *)orig_addr); |
280 | |||
281 | out: | ||
282 | pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); | ||
283 | |||
284 | return err; | ||
285 | } | 371 | } |
286 | 372 | ||
287 | void pmb_unmap(unsigned long addr) | 373 | int pmb_unmap(void __iomem *addr) |
288 | { | 374 | { |
289 | struct pmb_entry *pmbe = NULL; | 375 | struct pmb_entry *pmbe = NULL; |
290 | int i; | 376 | unsigned long vaddr = (unsigned long __force)addr; |
377 | int i, found = 0; | ||
291 | 378 | ||
292 | read_lock(&pmb_rwlock); | 379 | read_lock(&pmb_rwlock); |
293 | 380 | ||
294 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 381 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
295 | if (test_bit(i, pmb_map)) { | 382 | if (test_bit(i, pmb_map)) { |
296 | pmbe = &pmb_entry_list[i]; | 383 | pmbe = &pmb_entry_list[i]; |
297 | if (pmbe->vpn == addr) | 384 | if (pmbe->vpn == vaddr) { |
385 | found = 1; | ||
298 | break; | 386 | break; |
387 | } | ||
299 | } | 388 | } |
300 | } | 389 | } |
301 | 390 | ||
302 | read_unlock(&pmb_rwlock); | 391 | read_unlock(&pmb_rwlock); |
303 | 392 | ||
304 | pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); | 393 | if (found) { |
305 | } | 394 | pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); |
306 | 395 | return 0; | |
307 | static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) | 396 | } |
308 | { | ||
309 | return (b->vpn == (a->vpn + a->size)) && | ||
310 | (b->ppn == (a->ppn + a->size)) && | ||
311 | (b->flags == a->flags); | ||
312 | } | ||
313 | |||
314 | static bool pmb_size_valid(unsigned long size) | ||
315 | { | ||
316 | int i; | ||
317 | |||
318 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
319 | if (pmb_sizes[i].size == size) | ||
320 | return true; | ||
321 | |||
322 | return false; | ||
323 | } | ||
324 | |||
325 | static int pmb_size_to_flags(unsigned long size) | ||
326 | { | ||
327 | int i; | ||
328 | |||
329 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | ||
330 | if (pmb_sizes[i].size == size) | ||
331 | return pmb_sizes[i].flag; | ||
332 | 397 | ||
333 | return 0; | 398 | return -EINVAL; |
334 | } | 399 | } |
335 | 400 | ||
336 | static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | 401 | static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) |
@@ -368,11 +433,6 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | |||
368 | write_unlock_irqrestore(&pmb_rwlock, flags); | 433 | write_unlock_irqrestore(&pmb_rwlock, flags); |
369 | } | 434 | } |
370 | 435 | ||
371 | static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) | ||
372 | { | ||
373 | return ppn >= __pa(memory_start) && ppn < __pa(memory_end); | ||
374 | } | ||
375 | |||
376 | static void __init pmb_notify(void) | 436 | static void __init pmb_notify(void) |
377 | { | 437 | { |
378 | int i; | 438 | int i; |