diff options
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r-- | arch/sh/mm/pmb.c | 79 |
1 files changed, 38 insertions, 41 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 02aae06527dc..b6a5a338145b 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Privileged Space Mapping Buffer (PMB) Support. | 4 | * Privileged Space Mapping Buffer (PMB) Support. |
5 | * | 5 | * |
6 | * Copyright (C) 2005, 2006 Paul Mundt | 6 | * Copyright (C) 2005, 2006, 2007 Paul Mundt |
7 | * | 7 | * |
8 | * P1/P2 Section mapping definitions from map32.h, which was: | 8 | * P1/P2 Section mapping definitions from map32.h, which was: |
9 | * | 9 | * |
@@ -68,6 +68,32 @@ static inline unsigned long mk_pmb_data(unsigned int entry) | |||
68 | return mk_pmb_entry(entry) | PMB_DATA; | 68 | return mk_pmb_entry(entry) | PMB_DATA; |
69 | } | 69 | } |
70 | 70 | ||
71 | static DEFINE_SPINLOCK(pmb_list_lock); | ||
72 | static struct pmb_entry *pmb_list; | ||
73 | |||
74 | static inline void pmb_list_add(struct pmb_entry *pmbe) | ||
75 | { | ||
76 | struct pmb_entry **p, *tmp; | ||
77 | |||
78 | p = &pmb_list; | ||
79 | while ((tmp = *p) != NULL) | ||
80 | p = &tmp->next; | ||
81 | |||
82 | pmbe->next = tmp; | ||
83 | *p = pmbe; | ||
84 | } | ||
85 | |||
86 | static inline void pmb_list_del(struct pmb_entry *pmbe) | ||
87 | { | ||
88 | struct pmb_entry **p, *tmp; | ||
89 | |||
90 | for (p = &pmb_list; (tmp = *p); p = &tmp->next) | ||
91 | if (tmp == pmbe) { | ||
92 | *p = tmp->next; | ||
93 | return; | ||
94 | } | ||
95 | } | ||
96 | |||
71 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | 97 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, |
72 | unsigned long flags) | 98 | unsigned long flags) |
73 | { | 99 | { |
@@ -81,11 +107,19 @@ struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | |||
81 | pmbe->ppn = ppn; | 107 | pmbe->ppn = ppn; |
82 | pmbe->flags = flags; | 108 | pmbe->flags = flags; |
83 | 109 | ||
110 | spin_lock_irq(&pmb_list_lock); | ||
111 | pmb_list_add(pmbe); | ||
112 | spin_unlock_irq(&pmb_list_lock); | ||
113 | |||
84 | return pmbe; | 114 | return pmbe; |
85 | } | 115 | } |
86 | 116 | ||
87 | void pmb_free(struct pmb_entry *pmbe) | 117 | void pmb_free(struct pmb_entry *pmbe) |
88 | { | 118 | { |
119 | spin_lock_irq(&pmb_list_lock); | ||
120 | pmb_list_del(pmbe); | ||
121 | spin_unlock_irq(&pmb_list_lock); | ||
122 | |||
89 | kmem_cache_free(pmb_cache, pmbe); | 123 | kmem_cache_free(pmb_cache, pmbe); |
90 | } | 124 | } |
91 | 125 | ||
@@ -167,31 +201,6 @@ void clear_pmb_entry(struct pmb_entry *pmbe) | |||
167 | clear_bit(entry, &pmb_map); | 201 | clear_bit(entry, &pmb_map); |
168 | } | 202 | } |
169 | 203 | ||
170 | static DEFINE_SPINLOCK(pmb_list_lock); | ||
171 | static struct pmb_entry *pmb_list; | ||
172 | |||
173 | static inline void pmb_list_add(struct pmb_entry *pmbe) | ||
174 | { | ||
175 | struct pmb_entry **p, *tmp; | ||
176 | |||
177 | p = &pmb_list; | ||
178 | while ((tmp = *p) != NULL) | ||
179 | p = &tmp->next; | ||
180 | |||
181 | pmbe->next = tmp; | ||
182 | *p = pmbe; | ||
183 | } | ||
184 | |||
185 | static inline void pmb_list_del(struct pmb_entry *pmbe) | ||
186 | { | ||
187 | struct pmb_entry **p, *tmp; | ||
188 | |||
189 | for (p = &pmb_list; (tmp = *p); p = &tmp->next) | ||
190 | if (tmp == pmbe) { | ||
191 | *p = tmp->next; | ||
192 | return; | ||
193 | } | ||
194 | } | ||
195 | 204 | ||
196 | static struct { | 205 | static struct { |
197 | unsigned long size; | 206 | unsigned long size; |
@@ -283,25 +292,14 @@ void pmb_unmap(unsigned long addr) | |||
283 | } while (pmbe); | 292 | } while (pmbe); |
284 | } | 293 | } |
285 | 294 | ||
286 | static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags) | 295 | static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, |
296 | unsigned long flags) | ||
287 | { | 297 | { |
288 | struct pmb_entry *pmbe = pmb; | 298 | struct pmb_entry *pmbe = pmb; |
289 | 299 | ||
290 | memset(pmb, 0, sizeof(struct pmb_entry)); | 300 | memset(pmb, 0, sizeof(struct pmb_entry)); |
291 | 301 | ||
292 | spin_lock_irq(&pmb_list_lock); | ||
293 | |||
294 | pmbe->entry = PMB_NO_ENTRY; | 302 | pmbe->entry = PMB_NO_ENTRY; |
295 | pmb_list_add(pmbe); | ||
296 | |||
297 | spin_unlock_irq(&pmb_list_lock); | ||
298 | } | ||
299 | |||
300 | static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags) | ||
301 | { | ||
302 | spin_lock_irq(&pmb_list_lock); | ||
303 | pmb_list_del(pmb); | ||
304 | spin_unlock_irq(&pmb_list_lock); | ||
305 | } | 303 | } |
306 | 304 | ||
307 | static int __init pmb_init(void) | 305 | static int __init pmb_init(void) |
@@ -312,8 +310,7 @@ static int __init pmb_init(void) | |||
312 | BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); | 310 | BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); |
313 | 311 | ||
314 | pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, | 312 | pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, |
315 | SLAB_PANIC, pmb_cache_ctor, | 313 | SLAB_PANIC, pmb_cache_ctor, NULL); |
316 | pmb_cache_dtor); | ||
317 | 314 | ||
318 | jump_to_P2(); | 315 | jump_to_P2(); |
319 | 316 | ||