diff options
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r-- | arch/sh/mm/pmb.c | 152 |
1 files changed, 114 insertions, 38 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index cb808a8aaffc..e65e8b8e2a5e 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include <linux/seq_file.h> | 22 | #include <linux/seq_file.h> |
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
25 | #include <linux/spinlock.h> | ||
26 | #include <linux/rwlock.h> | ||
25 | #include <asm/sizes.h> | 27 | #include <asm/sizes.h> |
26 | #include <asm/system.h> | 28 | #include <asm/system.h> |
27 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
@@ -30,8 +32,29 @@ | |||
30 | #include <asm/mmu.h> | 32 | #include <asm/mmu.h> |
31 | #include <asm/mmu_context.h> | 33 | #include <asm/mmu_context.h> |
32 | 34 | ||
35 | struct pmb_entry; | ||
36 | |||
37 | struct pmb_entry { | ||
38 | unsigned long vpn; | ||
39 | unsigned long ppn; | ||
40 | unsigned long flags; | ||
41 | unsigned long size; | ||
42 | |||
43 | spinlock_t lock; | ||
44 | |||
45 | /* | ||
46 | * 0 .. NR_PMB_ENTRIES for specific entry selection, or | ||
47 | * PMB_NO_ENTRY to search for a free one | ||
48 | */ | ||
49 | int entry; | ||
50 | |||
51 | /* Adjacent entry link for contiguous multi-entry mappings */ | ||
52 | struct pmb_entry *link; | ||
53 | }; | ||
54 | |||
33 | static void pmb_unmap_entry(struct pmb_entry *); | 55 | static void pmb_unmap_entry(struct pmb_entry *); |
34 | 56 | ||
57 | static DEFINE_RWLOCK(pmb_rwlock); | ||
35 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; | 58 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; |
36 | static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); | 59 | static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); |
37 | 60 | ||
@@ -52,16 +75,13 @@ static __always_inline unsigned long mk_pmb_data(unsigned int entry) | |||
52 | 75 | ||
53 | static int pmb_alloc_entry(void) | 76 | static int pmb_alloc_entry(void) |
54 | { | 77 | { |
55 | unsigned int pos; | 78 | int pos; |
56 | 79 | ||
57 | repeat: | ||
58 | pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); | 80 | pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); |
59 | 81 | if (pos >= 0 && pos < NR_PMB_ENTRIES) | |
60 | if (unlikely(pos > NR_PMB_ENTRIES)) | 82 | __set_bit(pos, pmb_map); |
61 | return -ENOSPC; | 83 | else |
62 | 84 | pos = -ENOSPC; | |
63 | if (test_and_set_bit(pos, pmb_map)) | ||
64 | goto repeat; | ||
65 | 85 | ||
66 | return pos; | 86 | return pos; |
67 | } | 87 | } |
@@ -70,21 +90,32 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
70 | unsigned long flags, int entry) | 90 | unsigned long flags, int entry) |
71 | { | 91 | { |
72 | struct pmb_entry *pmbe; | 92 | struct pmb_entry *pmbe; |
93 | unsigned long irqflags; | ||
94 | void *ret = NULL; | ||
73 | int pos; | 95 | int pos; |
74 | 96 | ||
97 | write_lock_irqsave(&pmb_rwlock, irqflags); | ||
98 | |||
75 | if (entry == PMB_NO_ENTRY) { | 99 | if (entry == PMB_NO_ENTRY) { |
76 | pos = pmb_alloc_entry(); | 100 | pos = pmb_alloc_entry(); |
77 | if (pos < 0) | 101 | if (unlikely(pos < 0)) { |
78 | return ERR_PTR(pos); | 102 | ret = ERR_PTR(pos); |
103 | goto out; | ||
104 | } | ||
79 | } else { | 105 | } else { |
80 | if (test_and_set_bit(entry, pmb_map)) | 106 | if (__test_and_set_bit(entry, pmb_map)) { |
81 | return ERR_PTR(-ENOSPC); | 107 | ret = ERR_PTR(-ENOSPC); |
108 | goto out; | ||
109 | } | ||
110 | |||
82 | pos = entry; | 111 | pos = entry; |
83 | } | 112 | } |
84 | 113 | ||
114 | write_unlock_irqrestore(&pmb_rwlock, irqflags); | ||
115 | |||
85 | pmbe = &pmb_entry_list[pos]; | 116 | pmbe = &pmb_entry_list[pos]; |
86 | if (!pmbe) | 117 | |
87 | return ERR_PTR(-ENOMEM); | 118 | spin_lock_init(&pmbe->lock); |
88 | 119 | ||
89 | pmbe->vpn = vpn; | 120 | pmbe->vpn = vpn; |
90 | pmbe->ppn = ppn; | 121 | pmbe->ppn = ppn; |
@@ -93,11 +124,15 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
93 | pmbe->size = 0; | 124 | pmbe->size = 0; |
94 | 125 | ||
95 | return pmbe; | 126 | return pmbe; |
127 | |||
128 | out: | ||
129 | write_unlock_irqrestore(&pmb_rwlock, irqflags); | ||
130 | return ret; | ||
96 | } | 131 | } |
97 | 132 | ||
98 | static void pmb_free(struct pmb_entry *pmbe) | 133 | static void pmb_free(struct pmb_entry *pmbe) |
99 | { | 134 | { |
100 | clear_bit(pmbe->entry, pmb_map); | 135 | __clear_bit(pmbe->entry, pmb_map); |
101 | pmbe->entry = PMB_NO_ENTRY; | 136 | pmbe->entry = PMB_NO_ENTRY; |
102 | } | 137 | } |
103 | 138 | ||
@@ -124,7 +159,7 @@ static __always_inline unsigned long pmb_cache_flags(void) | |||
124 | /* | 159 | /* |
125 | * Must be run uncached. | 160 | * Must be run uncached. |
126 | */ | 161 | */ |
127 | static void set_pmb_entry(struct pmb_entry *pmbe) | 162 | static void __set_pmb_entry(struct pmb_entry *pmbe) |
128 | { | 163 | { |
129 | jump_to_uncached(); | 164 | jump_to_uncached(); |
130 | 165 | ||
@@ -137,7 +172,7 @@ static void set_pmb_entry(struct pmb_entry *pmbe) | |||
137 | back_to_cached(); | 172 | back_to_cached(); |
138 | } | 173 | } |
139 | 174 | ||
140 | static void clear_pmb_entry(struct pmb_entry *pmbe) | 175 | static void __clear_pmb_entry(struct pmb_entry *pmbe) |
141 | { | 176 | { |
142 | unsigned int entry = pmbe->entry; | 177 | unsigned int entry = pmbe->entry; |
143 | unsigned long addr; | 178 | unsigned long addr; |
@@ -154,6 +189,15 @@ static void clear_pmb_entry(struct pmb_entry *pmbe) | |||
154 | back_to_cached(); | 189 | back_to_cached(); |
155 | } | 190 | } |
156 | 191 | ||
192 | static void set_pmb_entry(struct pmb_entry *pmbe) | ||
193 | { | ||
194 | unsigned long flags; | ||
195 | |||
196 | spin_lock_irqsave(&pmbe->lock, flags); | ||
197 | __set_pmb_entry(pmbe); | ||
198 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
199 | } | ||
200 | |||
157 | static struct { | 201 | static struct { |
158 | unsigned long size; | 202 | unsigned long size; |
159 | int flag; | 203 | int flag; |
@@ -190,6 +234,8 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, | |||
190 | 234 | ||
191 | again: | 235 | again: |
192 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 236 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { |
237 | unsigned long flags; | ||
238 | |||
193 | if (size < pmb_sizes[i].size) | 239 | if (size < pmb_sizes[i].size) |
194 | continue; | 240 | continue; |
195 | 241 | ||
@@ -200,7 +246,9 @@ again: | |||
200 | goto out; | 246 | goto out; |
201 | } | 247 | } |
202 | 248 | ||
203 | set_pmb_entry(pmbe); | 249 | spin_lock_irqsave(&pmbe->lock, flags); |
250 | |||
251 | __set_pmb_entry(pmbe); | ||
204 | 252 | ||
205 | phys += pmb_sizes[i].size; | 253 | phys += pmb_sizes[i].size; |
206 | vaddr += pmb_sizes[i].size; | 254 | vaddr += pmb_sizes[i].size; |
@@ -212,8 +260,11 @@ again: | |||
212 | * Link adjacent entries that span multiple PMB entries | 260 | * Link adjacent entries that span multiple PMB entries |
213 | * for easier tear-down. | 261 | * for easier tear-down. |
214 | */ | 262 | */ |
215 | if (likely(pmbp)) | 263 | if (likely(pmbp)) { |
264 | spin_lock(&pmbp->lock); | ||
216 | pmbp->link = pmbe; | 265 | pmbp->link = pmbe; |
266 | spin_unlock(&pmbp->lock); | ||
267 | } | ||
217 | 268 | ||
218 | pmbp = pmbe; | 269 | pmbp = pmbe; |
219 | 270 | ||
@@ -223,9 +274,11 @@ again: | |||
223 | * pmb_sizes[i].size again. | 274 | * pmb_sizes[i].size again. |
224 | */ | 275 | */ |
225 | i--; | 276 | i--; |
277 | |||
278 | spin_unlock_irqrestore(&pmbe->lock, flags); | ||
226 | } | 279 | } |
227 | 280 | ||
228 | if (size >= 0x1000000) | 281 | if (size >= SZ_16M) |
229 | goto again; | 282 | goto again; |
230 | 283 | ||
231 | return wanted - size; | 284 | return wanted - size; |
@@ -238,29 +291,32 @@ out: | |||
238 | 291 | ||
239 | void pmb_unmap(unsigned long addr) | 292 | void pmb_unmap(unsigned long addr) |
240 | { | 293 | { |
241 | struct pmb_entry *pmbe; | 294 | struct pmb_entry *pmbe = NULL; |
242 | int i; | 295 | int i; |
243 | 296 | ||
297 | read_lock(&pmb_rwlock); | ||
298 | |||
244 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 299 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
245 | if (test_bit(i, pmb_map)) { | 300 | if (test_bit(i, pmb_map)) { |
246 | pmbe = &pmb_entry_list[i]; | 301 | pmbe = &pmb_entry_list[i]; |
247 | if (pmbe->vpn == addr) { | 302 | if (pmbe->vpn == addr) |
248 | pmb_unmap_entry(pmbe); | ||
249 | break; | 303 | break; |
250 | } | ||
251 | } | 304 | } |
252 | } | 305 | } |
306 | |||
307 | read_unlock(&pmb_rwlock); | ||
308 | |||
309 | pmb_unmap_entry(pmbe); | ||
253 | } | 310 | } |
254 | 311 | ||
255 | static void pmb_unmap_entry(struct pmb_entry *pmbe) | 312 | static void pmb_unmap_entry(struct pmb_entry *pmbe) |
256 | { | 313 | { |
314 | unsigned long flags; | ||
315 | |||
257 | if (unlikely(!pmbe)) | 316 | if (unlikely(!pmbe)) |
258 | return; | 317 | return; |
259 | 318 | ||
260 | if (!test_bit(pmbe->entry, pmb_map)) { | 319 | write_lock_irqsave(&pmb_rwlock, flags); |
261 | WARN_ON(1); | ||
262 | return; | ||
263 | } | ||
264 | 320 | ||
265 | do { | 321 | do { |
266 | struct pmb_entry *pmblink = pmbe; | 322 | struct pmb_entry *pmblink = pmbe; |
@@ -272,15 +328,17 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe) | |||
272 | * this entry in pmb_alloc() (even if we haven't filled | 328 | * this entry in pmb_alloc() (even if we haven't filled |
273 | * it yet). | 329 | * it yet). |
274 | * | 330 | * |
275 | * Therefore, calling clear_pmb_entry() is safe as no | 331 | * Therefore, calling __clear_pmb_entry() is safe as no |
276 | * other mapping can be using that slot. | 332 | * other mapping can be using that slot. |
277 | */ | 333 | */ |
278 | clear_pmb_entry(pmbe); | 334 | __clear_pmb_entry(pmbe); |
279 | 335 | ||
280 | pmbe = pmblink->link; | 336 | pmbe = pmblink->link; |
281 | 337 | ||
282 | pmb_free(pmblink); | 338 | pmb_free(pmblink); |
283 | } while (pmbe); | 339 | } while (pmbe); |
340 | |||
341 | write_unlock_irqrestore(&pmb_rwlock, flags); | ||
284 | } | 342 | } |
285 | 343 | ||
286 | static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) | 344 | static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) |
@@ -316,6 +374,7 @@ static int pmb_synchronize_mappings(void) | |||
316 | unsigned long addr, data; | 374 | unsigned long addr, data; |
317 | unsigned long addr_val, data_val; | 375 | unsigned long addr_val, data_val; |
318 | unsigned long ppn, vpn, flags; | 376 | unsigned long ppn, vpn, flags; |
377 | unsigned long irqflags; | ||
319 | unsigned int size; | 378 | unsigned int size; |
320 | struct pmb_entry *pmbe; | 379 | struct pmb_entry *pmbe; |
321 | 380 | ||
@@ -364,21 +423,31 @@ static int pmb_synchronize_mappings(void) | |||
364 | continue; | 423 | continue; |
365 | } | 424 | } |
366 | 425 | ||
426 | spin_lock_irqsave(&pmbe->lock, irqflags); | ||
427 | |||
367 | for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) | 428 | for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) |
368 | if (pmb_sizes[j].flag == size) | 429 | if (pmb_sizes[j].flag == size) |
369 | pmbe->size = pmb_sizes[j].size; | 430 | pmbe->size = pmb_sizes[j].size; |
370 | 431 | ||
371 | /* | 432 | if (pmbp) { |
372 | * Compare the previous entry against the current one to | 433 | spin_lock(&pmbp->lock); |
373 | * see if the entries span a contiguous mapping. If so, | 434 | |
374 | * setup the entry links accordingly. | 435 | /* |
375 | */ | 436 | * Compare the previous entry against the current one to |
376 | if (pmbp && ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && | 437 | * see if the entries span a contiguous mapping. If so, |
377 | (pmbe->ppn == (pmbp->ppn + pmbp->size)))) | 438 | * setup the entry links accordingly. |
378 | pmbp->link = pmbe; | 439 | */ |
440 | if ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && | ||
441 | (pmbe->ppn == (pmbp->ppn + pmbp->size))) | ||
442 | pmbp->link = pmbe; | ||
443 | |||
444 | spin_unlock(&pmbp->lock); | ||
445 | } | ||
379 | 446 | ||
380 | pmbp = pmbe; | 447 | pmbp = pmbe; |
381 | 448 | ||
449 | spin_unlock_irqrestore(&pmbe->lock, irqflags); | ||
450 | |||
382 | pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", | 451 | pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", |
383 | vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, | 452 | vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, |
384 | (data_val & PMB_C) ? "" : "un"); | 453 | (data_val & PMB_C) ? "" : "un"); |
@@ -493,14 +562,21 @@ static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) | |||
493 | if (state.event == PM_EVENT_ON && | 562 | if (state.event == PM_EVENT_ON && |
494 | prev_state.event == PM_EVENT_FREEZE) { | 563 | prev_state.event == PM_EVENT_FREEZE) { |
495 | struct pmb_entry *pmbe; | 564 | struct pmb_entry *pmbe; |
565 | |||
566 | read_lock(&pmb_rwlock); | ||
567 | |||
496 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 568 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
497 | if (test_bit(i, pmb_map)) { | 569 | if (test_bit(i, pmb_map)) { |
498 | pmbe = &pmb_entry_list[i]; | 570 | pmbe = &pmb_entry_list[i]; |
499 | set_pmb_entry(pmbe); | 571 | set_pmb_entry(pmbe); |
500 | } | 572 | } |
501 | } | 573 | } |
574 | |||
575 | read_unlock(&pmb_rwlock); | ||
502 | } | 576 | } |
577 | |||
503 | prev_state = state; | 578 | prev_state = state; |
579 | |||
504 | return 0; | 580 | return 0; |
505 | } | 581 | } |
506 | 582 | ||