aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-13 08:44:38 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-13 15:02:44 -0400
commit873623dfabaa6ebbdc1ce16c1766a3c0ec5d9923 (patch)
treeea7fe80a8a5c78b1f372dfde56cd7deba8e23104
parent0e2ffbf650bf97499c02327719383818777651e6 (diff)
[PATCH] lockdep: undo mm/slab.c annotation
undo existing mm/slab.c lock-validator annotations, in preparation of a new, less intrusive annotation patch. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/slab.c33
1 files changed, 10 insertions, 23 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 85c2e03098a7..fd1e4c4c1397 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1021,8 +1021,7 @@ static void drain_alien_cache(struct kmem_cache *cachep,
1021 } 1021 }
1022} 1022}
1023 1023
1024static inline int cache_free_alien(struct kmem_cache *cachep, void *objp, 1024static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1025 int nesting)
1026{ 1025{
1027 struct slab *slabp = virt_to_slab(objp); 1026 struct slab *slabp = virt_to_slab(objp);
1028 int nodeid = slabp->nodeid; 1027 int nodeid = slabp->nodeid;
@@ -1040,7 +1039,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp,
1040 STATS_INC_NODEFREES(cachep); 1039 STATS_INC_NODEFREES(cachep);
1041 if (l3->alien && l3->alien[nodeid]) { 1040 if (l3->alien && l3->alien[nodeid]) {
1042 alien = l3->alien[nodeid]; 1041 alien = l3->alien[nodeid];
1043 spin_lock_nested(&alien->lock, nesting); 1042 spin_lock(&alien->lock);
1044 if (unlikely(alien->avail == alien->limit)) { 1043 if (unlikely(alien->avail == alien->limit)) {
1045 STATS_INC_ACOVERFLOW(cachep); 1044 STATS_INC_ACOVERFLOW(cachep);
1046 __drain_alien_cache(cachep, alien, nodeid); 1045 __drain_alien_cache(cachep, alien, nodeid);
@@ -1069,8 +1068,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
1069{ 1068{
1070} 1069}
1071 1070
1072static inline int cache_free_alien(struct kmem_cache *cachep, void *objp, 1071static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1073 int nesting)
1074{ 1072{
1075 return 0; 1073 return 0;
1076} 1074}
@@ -1760,8 +1758,6 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1760} 1758}
1761#endif 1759#endif
1762 1760
1763static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting);
1764
1765/** 1761/**
1766 * slab_destroy - destroy and release all objects in a slab 1762 * slab_destroy - destroy and release all objects in a slab
1767 * @cachep: cache pointer being destroyed 1763 * @cachep: cache pointer being destroyed
@@ -1785,17 +1781,8 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1785 call_rcu(&slab_rcu->head, kmem_rcu_free); 1781 call_rcu(&slab_rcu->head, kmem_rcu_free);
1786 } else { 1782 } else {
1787 kmem_freepages(cachep, addr); 1783 kmem_freepages(cachep, addr);
1788 if (OFF_SLAB(cachep)) { 1784 if (OFF_SLAB(cachep))
1789 unsigned long flags; 1785 kmem_cache_free(cachep->slabp_cache, slabp);
1790
1791 /*
1792 * lockdep: we may nest inside an already held
1793 * ac->lock, so pass in a nesting flag:
1794 */
1795 local_irq_save(flags);
1796 __cache_free(cachep->slabp_cache, slabp, 1);
1797 local_irq_restore(flags);
1798 }
1799 } 1786 }
1800} 1787}
1801 1788
@@ -3135,7 +3122,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3135#endif 3122#endif
3136 check_irq_off(); 3123 check_irq_off();
3137 l3 = cachep->nodelists[node]; 3124 l3 = cachep->nodelists[node];
3138 spin_lock_nested(&l3->list_lock, SINGLE_DEPTH_NESTING); 3125 spin_lock(&l3->list_lock);
3139 if (l3->shared) { 3126 if (l3->shared) {
3140 struct array_cache *shared_array = l3->shared; 3127 struct array_cache *shared_array = l3->shared;
3141 int max = shared_array->limit - shared_array->avail; 3128 int max = shared_array->limit - shared_array->avail;
@@ -3178,14 +3165,14 @@ free_done:
3178 * Release an obj back to its cache. If the obj has a constructed state, it must 3165 * Release an obj back to its cache. If the obj has a constructed state, it must
3179 * be in this state _before_ it is released. Called with disabled ints. 3166 * be in this state _before_ it is released. Called with disabled ints.
3180 */ 3167 */
3181static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting) 3168static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3182{ 3169{
3183 struct array_cache *ac = cpu_cache_get(cachep); 3170 struct array_cache *ac = cpu_cache_get(cachep);
3184 3171
3185 check_irq_off(); 3172 check_irq_off();
3186 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3173 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3187 3174
3188 if (cache_free_alien(cachep, objp, nesting)) 3175 if (cache_free_alien(cachep, objp))
3189 return; 3176 return;
3190 3177
3191 if (likely(ac->avail < ac->limit)) { 3178 if (likely(ac->avail < ac->limit)) {
@@ -3424,7 +3411,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3424 BUG_ON(virt_to_cache(objp) != cachep); 3411 BUG_ON(virt_to_cache(objp) != cachep);
3425 3412
3426 local_irq_save(flags); 3413 local_irq_save(flags);
3427 __cache_free(cachep, objp, 0); 3414 __cache_free(cachep, objp);
3428 local_irq_restore(flags); 3415 local_irq_restore(flags);
3429} 3416}
3430EXPORT_SYMBOL(kmem_cache_free); 3417EXPORT_SYMBOL(kmem_cache_free);
@@ -3449,7 +3436,7 @@ void kfree(const void *objp)
3449 kfree_debugcheck(objp); 3436 kfree_debugcheck(objp);
3450 c = virt_to_cache(objp); 3437 c = virt_to_cache(objp);
3451 debug_check_no_locks_freed(objp, obj_size(c)); 3438 debug_check_no_locks_freed(objp, obj_size(c));
3452 __cache_free(c, (void *)objp, 0); 3439 __cache_free(c, (void *)objp);
3453 local_irq_restore(flags); 3440 local_irq_restore(flags);
3454} 3441}
3455EXPORT_SYMBOL(kfree); 3442EXPORT_SYMBOL(kfree);