diff options
-rw-r--r-- | arch/sh/mm/pmb.c | 81 |
1 files changed, 26 insertions, 55 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index f01c8191144c..baf365fcdb4a 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -35,7 +35,7 @@ | |||
35 | 35 | ||
36 | static void __pmb_unmap(struct pmb_entry *); | 36 | static void __pmb_unmap(struct pmb_entry *); |
37 | 37 | ||
38 | static struct kmem_cache *pmb_cache; | 38 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; |
39 | static unsigned long pmb_map; | 39 | static unsigned long pmb_map; |
40 | 40 | ||
41 | static struct pmb_entry pmb_init_map[] = { | 41 | static struct pmb_entry pmb_init_map[] = { |
@@ -73,32 +73,6 @@ static inline unsigned long mk_pmb_data(unsigned int entry) | |||
73 | return mk_pmb_entry(entry) | PMB_DATA; | 73 | return mk_pmb_entry(entry) | PMB_DATA; |
74 | } | 74 | } |
75 | 75 | ||
76 | static DEFINE_SPINLOCK(pmb_list_lock); | ||
77 | static struct pmb_entry *pmb_list; | ||
78 | |||
79 | static inline void pmb_list_add(struct pmb_entry *pmbe) | ||
80 | { | ||
81 | struct pmb_entry **p, *tmp; | ||
82 | |||
83 | p = &pmb_list; | ||
84 | while ((tmp = *p) != NULL) | ||
85 | p = &tmp->next; | ||
86 | |||
87 | pmbe->next = tmp; | ||
88 | *p = pmbe; | ||
89 | } | ||
90 | |||
91 | static inline void pmb_list_del(struct pmb_entry *pmbe) | ||
92 | { | ||
93 | struct pmb_entry **p, *tmp; | ||
94 | |||
95 | for (p = &pmb_list; (tmp = *p); p = &tmp->next) | ||
96 | if (tmp == pmbe) { | ||
97 | *p = tmp->next; | ||
98 | return; | ||
99 | } | ||
100 | } | ||
101 | |||
102 | static int pmb_alloc_entry(void) | 76 | static int pmb_alloc_entry(void) |
103 | { | 77 | { |
104 | unsigned int pos; | 78 | unsigned int pos; |
@@ -125,7 +99,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
125 | if (pos < 0) | 99 | if (pos < 0) |
126 | return ERR_PTR(pos); | 100 | return ERR_PTR(pos); |
127 | 101 | ||
128 | pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); | 102 | pmbe = &pmb_entry_list[pos]; |
129 | if (!pmbe) | 103 | if (!pmbe) |
130 | return ERR_PTR(-ENOMEM); | 104 | return ERR_PTR(-ENOMEM); |
131 | 105 | ||
@@ -134,20 +108,19 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
134 | pmbe->flags = flags; | 108 | pmbe->flags = flags; |
135 | pmbe->entry = pos; | 109 | pmbe->entry = pos; |
136 | 110 | ||
137 | spin_lock_irq(&pmb_list_lock); | ||
138 | pmb_list_add(pmbe); | ||
139 | spin_unlock_irq(&pmb_list_lock); | ||
140 | |||
141 | return pmbe; | 111 | return pmbe; |
142 | } | 112 | } |
143 | 113 | ||
144 | static void pmb_free(struct pmb_entry *pmbe) | 114 | static void pmb_free(struct pmb_entry *pmbe) |
145 | { | 115 | { |
146 | spin_lock_irq(&pmb_list_lock); | 116 | int pos = pmbe->entry; |
147 | pmb_list_del(pmbe); | ||
148 | spin_unlock_irq(&pmb_list_lock); | ||
149 | 117 | ||
150 | kmem_cache_free(pmb_cache, pmbe); | 118 | pmbe->vpn = 0; |
119 | pmbe->ppn = 0; | ||
120 | pmbe->flags = 0; | ||
121 | pmbe->entry = 0; | ||
122 | |||
123 | clear_bit(pos, &pmb_map); | ||
151 | } | 124 | } |
152 | 125 | ||
153 | /* | 126 | /* |
@@ -202,8 +175,6 @@ static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | |||
202 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); | 175 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); |
203 | 176 | ||
204 | back_to_cached(); | 177 | back_to_cached(); |
205 | |||
206 | clear_bit(entry, &pmb_map); | ||
207 | } | 178 | } |
208 | 179 | ||
209 | 180 | ||
@@ -285,11 +256,16 @@ out: | |||
285 | 256 | ||
286 | void pmb_unmap(unsigned long addr) | 257 | void pmb_unmap(unsigned long addr) |
287 | { | 258 | { |
288 | struct pmb_entry **p, *pmbe; | 259 | struct pmb_entry *pmbe = NULL; |
260 | int i; | ||
289 | 261 | ||
290 | for (p = &pmb_list; (pmbe = *p); p = &pmbe->next) | 262 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
291 | if (pmbe->vpn == addr) | 263 | if (test_bit(i, &pmb_map)) { |
292 | break; | 264 | pmbe = &pmb_entry_list[i]; |
265 | if (pmbe->vpn == addr) | ||
266 | break; | ||
267 | } | ||
268 | } | ||
293 | 269 | ||
294 | if (unlikely(!pmbe)) | 270 | if (unlikely(!pmbe)) |
295 | return; | 271 | return; |
@@ -299,7 +275,7 @@ void pmb_unmap(unsigned long addr) | |||
299 | 275 | ||
300 | static void __pmb_unmap(struct pmb_entry *pmbe) | 276 | static void __pmb_unmap(struct pmb_entry *pmbe) |
301 | { | 277 | { |
302 | WARN_ON(!test_bit(pmbe->entry, &pmb_map)); | 278 | BUG_ON(!test_bit(pmbe->entry, &pmb_map)); |
303 | 279 | ||
304 | do { | 280 | do { |
305 | struct pmb_entry *pmblink = pmbe; | 281 | struct pmb_entry *pmblink = pmbe; |
@@ -322,11 +298,6 @@ static void __pmb_unmap(struct pmb_entry *pmbe) | |||
322 | } while (pmbe); | 298 | } while (pmbe); |
323 | } | 299 | } |
324 | 300 | ||
325 | static void pmb_cache_ctor(void *pmb) | ||
326 | { | ||
327 | memset(pmb, 0, sizeof(struct pmb_entry)); | ||
328 | } | ||
329 | |||
330 | int __uses_jump_to_uncached pmb_init(void) | 301 | int __uses_jump_to_uncached pmb_init(void) |
331 | { | 302 | { |
332 | unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); | 303 | unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); |
@@ -334,9 +305,6 @@ int __uses_jump_to_uncached pmb_init(void) | |||
334 | 305 | ||
335 | BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); | 306 | BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); |
336 | 307 | ||
337 | pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, | ||
338 | SLAB_PANIC, pmb_cache_ctor); | ||
339 | |||
340 | jump_to_uncached(); | 308 | jump_to_uncached(); |
341 | 309 | ||
342 | /* | 310 | /* |
@@ -431,15 +399,18 @@ postcore_initcall(pmb_debugfs_init); | |||
431 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) | 399 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) |
432 | { | 400 | { |
433 | static pm_message_t prev_state; | 401 | static pm_message_t prev_state; |
402 | int i; | ||
434 | 403 | ||
435 | /* Restore the PMB after a resume from hibernation */ | 404 | /* Restore the PMB after a resume from hibernation */ |
436 | if (state.event == PM_EVENT_ON && | 405 | if (state.event == PM_EVENT_ON && |
437 | prev_state.event == PM_EVENT_FREEZE) { | 406 | prev_state.event == PM_EVENT_FREEZE) { |
438 | struct pmb_entry *pmbe; | 407 | struct pmb_entry *pmbe; |
439 | spin_lock_irq(&pmb_list_lock); | 408 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
440 | for (pmbe = pmb_list; pmbe; pmbe = pmbe->next) | 409 | if (test_bit(i, &pmb_map)) { |
441 | set_pmb_entry(pmbe); | 410 | pmbe = &pmb_entry_list[i]; |
442 | spin_unlock_irq(&pmb_list_lock); | 411 | set_pmb_entry(pmbe); |
412 | } | ||
413 | } | ||
443 | } | 414 | } |
444 | prev_state = state; | 415 | prev_state = state; |
445 | return 0; | 416 | return 0; |