summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-06-01 13:25:55 -0400
committerPekka Enberg <penberg@kernel.org>2011-07-02 06:26:56 -0400
commit80f08c191f6c9563641291bea80657a3b9faabf0 (patch)
tree9ad3d121f6a6cd30e317b5819a0d8cb729e2b296 /mm
parent5c2e4bbbd60623f1024a753c291b666068f8a6e7 (diff)
slub: Avoid disabling interrupts in free slowpath
Disabling interrupts can be avoided now. However, list operation still require disabling interrupts since allocations can occur from interrupt contexts and there is no way to perform atomic list operations. The acquition of the list_lock therefore has to disable interrupts as well. Dropping interrupt handling significantly simplifies the slowpath. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 08c57a047548..cb6b0857e1a6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2197,11 +2197,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2197 struct kmem_cache_node *n = NULL; 2197 struct kmem_cache_node *n = NULL;
2198 unsigned long uninitialized_var(flags); 2198 unsigned long uninitialized_var(flags);
2199 2199
2200 local_irq_save(flags);
2201 stat(s, FREE_SLOWPATH); 2200 stat(s, FREE_SLOWPATH);
2202 2201
2203 if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) 2202 if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
2204 goto out_unlock; 2203 return;
2205 2204
2206 do { 2205 do {
2207 prior = page->freelist; 2206 prior = page->freelist;
@@ -2220,7 +2219,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2220 * Otherwise the list_lock will synchronize with 2219 * Otherwise the list_lock will synchronize with
2221 * other processors updating the list of slabs. 2220 * other processors updating the list of slabs.
2222 */ 2221 */
2223 spin_lock(&n->list_lock); 2222 spin_lock_irqsave(&n->list_lock, flags);
2224 } 2223 }
2225 inuse = new.inuse; 2224 inuse = new.inuse;
2226 2225
@@ -2236,7 +2235,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2236 */ 2235 */
2237 if (was_frozen) 2236 if (was_frozen)
2238 stat(s, FREE_FROZEN); 2237 stat(s, FREE_FROZEN);
2239 goto out_unlock; 2238 return;
2240 } 2239 }
2241 2240
2242 /* 2241 /*
@@ -2259,11 +2258,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2259 stat(s, FREE_ADD_PARTIAL); 2258 stat(s, FREE_ADD_PARTIAL);
2260 } 2259 }
2261 } 2260 }
2262 2261 spin_unlock_irqrestore(&n->list_lock, flags);
2263 spin_unlock(&n->list_lock);
2264
2265out_unlock:
2266 local_irq_restore(flags);
2267 return; 2262 return;
2268 2263
2269slab_empty: 2264slab_empty:
@@ -2275,8 +2270,7 @@ slab_empty:
2275 stat(s, FREE_REMOVE_PARTIAL); 2270 stat(s, FREE_REMOVE_PARTIAL);
2276 } 2271 }
2277 2272
2278 spin_unlock(&n->list_lock); 2273 spin_unlock_irqrestore(&n->list_lock, flags);
2279 local_irq_restore(flags);
2280 stat(s, FREE_SLAB); 2274 stat(s, FREE_SLAB);
2281 discard_slab(s, page); 2275 discard_slab(s, page);
2282} 2276}