diff options
author | Matt Fleming <matt@console-pimps.org> | 2009-10-06 17:22:23 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-10-10 08:49:57 -0400 |
commit | 067784f6239e08a084b4d8d597e14435331eae51 (patch) | |
tree | 0bfe60656a538d486df6bf79dc238cbf48b0ce04 /arch/sh/mm/pmb.c | |
parent | 5e3679c594e3a9bf819347bc59f70e03f2c6b272 (diff) |
sh: Allocate PMB entry slot earlier
Simplify set_pmb_entry() by removing the possibility of not finding a
free slot in the PMB. Instead we now allocate a slot in pmb_alloc() so
that if there are no free slots we fail at allocation time, rather than
in set_pmb_entry().
Signed-off-by: Matt Fleming <matt@console-pimps.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r-- | arch/sh/mm/pmb.c | 80 |
1 files changed, 39 insertions, 41 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index aade31102112..b8a33949296a 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -99,10 +99,31 @@ static inline void pmb_list_del(struct pmb_entry *pmbe) | |||
99 | } | 99 | } |
100 | } | 100 | } |
101 | 101 | ||
102 | static int pmb_alloc_entry(void) | ||
103 | { | ||
104 | unsigned int pos; | ||
105 | |||
106 | repeat: | ||
107 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
108 | |||
109 | if (unlikely(pos > NR_PMB_ENTRIES)) | ||
110 | return -ENOSPC; | ||
111 | |||
112 | if (test_and_set_bit(pos, &pmb_map)) | ||
113 | goto repeat; | ||
114 | |||
115 | return pos; | ||
116 | } | ||
117 | |||
102 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | 118 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, |
103 | unsigned long flags) | 119 | unsigned long flags) |
104 | { | 120 | { |
105 | struct pmb_entry *pmbe; | 121 | struct pmb_entry *pmbe; |
122 | int pos; | ||
123 | |||
124 | pos = pmb_alloc_entry(); | ||
125 | if (pos < 0) | ||
126 | return ERR_PTR(pos); | ||
106 | 127 | ||
107 | pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); | 128 | pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); |
108 | if (!pmbe) | 129 | if (!pmbe) |
@@ -111,6 +132,7 @@ struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
111 | pmbe->vpn = vpn; | 132 | pmbe->vpn = vpn; |
112 | pmbe->ppn = ppn; | 133 | pmbe->ppn = ppn; |
113 | pmbe->flags = flags; | 134 | pmbe->flags = flags; |
135 | pmbe->entry = pos; | ||
114 | 136 | ||
115 | spin_lock_irq(&pmb_list_lock); | 137 | spin_lock_irq(&pmb_list_lock); |
116 | pmb_list_add(pmbe); | 138 | pmb_list_add(pmbe); |
@@ -131,23 +153,9 @@ void pmb_free(struct pmb_entry *pmbe) | |||
131 | /* | 153 | /* |
132 | * Must be in P2 for __set_pmb_entry() | 154 | * Must be in P2 for __set_pmb_entry() |
133 | */ | 155 | */ |
134 | int __set_pmb_entry(unsigned long vpn, unsigned long ppn, | 156 | void __set_pmb_entry(unsigned long vpn, unsigned long ppn, |
135 | unsigned long flags, int *entry) | 157 | unsigned long flags, int pos) |
136 | { | 158 | { |
137 | unsigned int pos = *entry; | ||
138 | |||
139 | if (unlikely(pos == PMB_NO_ENTRY)) | ||
140 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
141 | |||
142 | repeat: | ||
143 | if (unlikely(pos > NR_PMB_ENTRIES)) | ||
144 | return -ENOSPC; | ||
145 | |||
146 | if (test_and_set_bit(pos, &pmb_map)) { | ||
147 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
148 | goto repeat; | ||
149 | } | ||
150 | |||
151 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); | 159 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); |
152 | 160 | ||
153 | #ifdef CONFIG_CACHE_WRITETHROUGH | 161 | #ifdef CONFIG_CACHE_WRITETHROUGH |
@@ -161,21 +169,13 @@ repeat: | |||
161 | #endif | 169 | #endif |
162 | 170 | ||
163 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); | 171 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); |
164 | |||
165 | *entry = pos; | ||
166 | |||
167 | return 0; | ||
168 | } | 172 | } |
169 | 173 | ||
170 | int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) | 174 | void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) |
171 | { | 175 | { |
172 | int ret; | ||
173 | |||
174 | jump_to_uncached(); | 176 | jump_to_uncached(); |
175 | ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); | 177 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); |
176 | back_to_cached(); | 178 | back_to_cached(); |
177 | |||
178 | return ret; | ||
179 | } | 179 | } |
180 | 180 | ||
181 | void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | 181 | void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) |
@@ -239,8 +239,6 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, | |||
239 | 239 | ||
240 | again: | 240 | again: |
241 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 241 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { |
242 | int ret; | ||
243 | |||
244 | if (size < pmb_sizes[i].size) | 242 | if (size < pmb_sizes[i].size) |
245 | continue; | 243 | continue; |
246 | 244 | ||
@@ -250,12 +248,7 @@ again: | |||
250 | goto out; | 248 | goto out; |
251 | } | 249 | } |
252 | 250 | ||
253 | ret = set_pmb_entry(pmbe); | 251 | set_pmb_entry(pmbe); |
254 | if (ret != 0) { | ||
255 | pmb_free(pmbe); | ||
256 | err = -EBUSY; | ||
257 | goto out; | ||
258 | } | ||
259 | 252 | ||
260 | phys += pmb_sizes[i].size; | 253 | phys += pmb_sizes[i].size; |
261 | vaddr += pmb_sizes[i].size; | 254 | vaddr += pmb_sizes[i].size; |
@@ -311,8 +304,17 @@ static void __pmb_unmap(struct pmb_entry *pmbe) | |||
311 | do { | 304 | do { |
312 | struct pmb_entry *pmblink = pmbe; | 305 | struct pmb_entry *pmblink = pmbe; |
313 | 306 | ||
314 | if (pmbe->entry != PMB_NO_ENTRY) | 307 | /* |
315 | clear_pmb_entry(pmbe); | 308 | * We may be called before this pmb_entry has been |
309 | * entered into the PMB table via set_pmb_entry(), but | ||
310 | * that's OK because we've allocated a unique slot for | ||
311 | * this entry in pmb_alloc() (even if we haven't filled | ||
312 | * it yet). | ||
313 | * | ||
314 | * Therefore, calling clear_pmb_entry() is safe as no | ||
315 | * other mapping can be using that slot. | ||
316 | */ | ||
317 | clear_pmb_entry(pmbe); | ||
316 | 318 | ||
317 | pmbe = pmblink->link; | 319 | pmbe = pmblink->link; |
318 | 320 | ||
@@ -322,11 +324,7 @@ static void __pmb_unmap(struct pmb_entry *pmbe) | |||
322 | 324 | ||
323 | static void pmb_cache_ctor(void *pmb) | 325 | static void pmb_cache_ctor(void *pmb) |
324 | { | 326 | { |
325 | struct pmb_entry *pmbe = pmb; | ||
326 | |||
327 | memset(pmb, 0, sizeof(struct pmb_entry)); | 327 | memset(pmb, 0, sizeof(struct pmb_entry)); |
328 | |||
329 | pmbe->entry = PMB_NO_ENTRY; | ||
330 | } | 328 | } |
331 | 329 | ||
332 | static int __uses_jump_to_uncached pmb_init(void) | 330 | static int __uses_jump_to_uncached pmb_init(void) |
@@ -349,7 +347,7 @@ static int __uses_jump_to_uncached pmb_init(void) | |||
349 | for (entry = 0; entry < nr_entries; entry++) { | 347 | for (entry = 0; entry < nr_entries; entry++) { |
350 | struct pmb_entry *pmbe = pmb_init_map + entry; | 348 | struct pmb_entry *pmbe = pmb_init_map + entry; |
351 | 349 | ||
352 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry); | 350 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, entry); |
353 | } | 351 | } |
354 | 352 | ||
355 | ctrl_outl(0, PMB_IRMCR); | 353 | ctrl_outl(0, PMB_IRMCR); |