aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-05-10 22:26:10 -0400
committerPaul Mundt <lethal@linux-sh.org>2007-05-13 20:18:34 -0400
commit38c425f69c8d949620384f917e00652eaf390ec9 (patch)
tree0e4cbaf7e1be44c92db0587d77fc8e4f70227db1 /arch/sh
parent0facbe3a34556bbc30333971e32c5430b087fcb1 (diff)
sh: Kill off pmb slab cache destructor.
This is the last remaining slab destructor in the kernel, which we kill off and move the resultant list tracking logic up to the pmb_alloc()/pmb_free() paths. As Christoph Lameter pointed out, it's potentially unsafe to be taking the list lock in the destructor anyways, so this is also more fundamentally correct. With this in place, we're all set for killing off slab destructors from the kernel entirely. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/mm/pmb.c79
1 files changed, 38 insertions, 41 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 02aae06527dc..b6a5a338145b 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Privileged Space Mapping Buffer (PMB) Support. 4 * Privileged Space Mapping Buffer (PMB) Support.
5 * 5 *
6 * Copyright (C) 2005, 2006 Paul Mundt 6 * Copyright (C) 2005, 2006, 2007 Paul Mundt
7 * 7 *
8 * P1/P2 Section mapping definitions from map32.h, which was: 8 * P1/P2 Section mapping definitions from map32.h, which was:
9 * 9 *
@@ -68,6 +68,32 @@ static inline unsigned long mk_pmb_data(unsigned int entry)
68 return mk_pmb_entry(entry) | PMB_DATA; 68 return mk_pmb_entry(entry) | PMB_DATA;
69} 69}
70 70
71static DEFINE_SPINLOCK(pmb_list_lock);
72static struct pmb_entry *pmb_list;
73
74static inline void pmb_list_add(struct pmb_entry *pmbe)
75{
76 struct pmb_entry **p, *tmp;
77
78 p = &pmb_list;
79 while ((tmp = *p) != NULL)
80 p = &tmp->next;
81
82 pmbe->next = tmp;
83 *p = pmbe;
84}
85
86static inline void pmb_list_del(struct pmb_entry *pmbe)
87{
88 struct pmb_entry **p, *tmp;
89
90 for (p = &pmb_list; (tmp = *p); p = &tmp->next)
91 if (tmp == pmbe) {
92 *p = tmp->next;
93 return;
94 }
95}
96
71struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, 97struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
72 unsigned long flags) 98 unsigned long flags)
73{ 99{
@@ -81,11 +107,19 @@ struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
81 pmbe->ppn = ppn; 107 pmbe->ppn = ppn;
82 pmbe->flags = flags; 108 pmbe->flags = flags;
83 109
110 spin_lock_irq(&pmb_list_lock);
111 pmb_list_add(pmbe);
112 spin_unlock_irq(&pmb_list_lock);
113
84 return pmbe; 114 return pmbe;
85} 115}
86 116
87void pmb_free(struct pmb_entry *pmbe) 117void pmb_free(struct pmb_entry *pmbe)
88{ 118{
119 spin_lock_irq(&pmb_list_lock);
120 pmb_list_del(pmbe);
121 spin_unlock_irq(&pmb_list_lock);
122
89 kmem_cache_free(pmb_cache, pmbe); 123 kmem_cache_free(pmb_cache, pmbe);
90} 124}
91 125
@@ -167,31 +201,6 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
167 clear_bit(entry, &pmb_map); 201 clear_bit(entry, &pmb_map);
168} 202}
169 203
170static DEFINE_SPINLOCK(pmb_list_lock);
171static struct pmb_entry *pmb_list;
172
173static inline void pmb_list_add(struct pmb_entry *pmbe)
174{
175 struct pmb_entry **p, *tmp;
176
177 p = &pmb_list;
178 while ((tmp = *p) != NULL)
179 p = &tmp->next;
180
181 pmbe->next = tmp;
182 *p = pmbe;
183}
184
185static inline void pmb_list_del(struct pmb_entry *pmbe)
186{
187 struct pmb_entry **p, *tmp;
188
189 for (p = &pmb_list; (tmp = *p); p = &tmp->next)
190 if (tmp == pmbe) {
191 *p = tmp->next;
192 return;
193 }
194}
195 204
196static struct { 205static struct {
197 unsigned long size; 206 unsigned long size;
@@ -283,25 +292,14 @@ void pmb_unmap(unsigned long addr)
283 } while (pmbe); 292 } while (pmbe);
284} 293}
285 294
286static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags) 295static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep,
296 unsigned long flags)
287{ 297{
288 struct pmb_entry *pmbe = pmb; 298 struct pmb_entry *pmbe = pmb;
289 299
290 memset(pmb, 0, sizeof(struct pmb_entry)); 300 memset(pmb, 0, sizeof(struct pmb_entry));
291 301
292 spin_lock_irq(&pmb_list_lock);
293
294 pmbe->entry = PMB_NO_ENTRY; 302 pmbe->entry = PMB_NO_ENTRY;
295 pmb_list_add(pmbe);
296
297 spin_unlock_irq(&pmb_list_lock);
298}
299
300static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
301{
302 spin_lock_irq(&pmb_list_lock);
303 pmb_list_del(pmb);
304 spin_unlock_irq(&pmb_list_lock);
305} 303}
306 304
307static int __init pmb_init(void) 305static int __init pmb_init(void)
@@ -312,8 +310,7 @@ static int __init pmb_init(void)
312 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); 310 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
313 311
314 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, 312 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
315 SLAB_PANIC, pmb_cache_ctor, 313 SLAB_PANIC, pmb_cache_ctor, NULL);
316 pmb_cache_dtor);
317 314
318 jump_to_P2(); 315 jump_to_P2();
319 316