aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-07-14 13:49:12 -0400
committerPekka Enberg <penberg@kernel.org>2011-07-18 08:17:02 -0400
commit1d07171c5e58e68a76a141970a3a5e816a414ce6 (patch)
tree9a8dcbf464b150d68daf0295e1ce644c1ec6d987 /mm
parent013e896373fc53f3d3c039364a25ccbd1fc0729a (diff)
slub: disable interrupts in cmpxchg_double_slab when falling back to pagelock
Split cmpxchg_double_slab into two functions. One for the case where we know that interrupts are disabled (and therefore the fallback does not need to disable interrupts) and one for the other cases where fallback will also disable interrupts. This fixes the issue that __slab_free called cmpxchg_double_slab in some scenarios without disabling interrupts. Tested-by: Hugh Dickins <hughd@google.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c49
1 files changed, 45 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 78c488202f7d..7836b45ea1fa 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -354,6 +354,42 @@ static __always_inline void slab_unlock(struct page *page)
354 __bit_spin_unlock(PG_locked, &page->flags); 354 __bit_spin_unlock(PG_locked, &page->flags);
355} 355}
356 356
357/* Interrupts must be disabled (for the fallback code to work right) */
358static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
359 void *freelist_old, unsigned long counters_old,
360 void *freelist_new, unsigned long counters_new,
361 const char *n)
362{
363 VM_BUG_ON(!irqs_disabled());
364#ifdef CONFIG_CMPXCHG_DOUBLE
365 if (s->flags & __CMPXCHG_DOUBLE) {
366 if (cmpxchg_double(&page->freelist,
367 freelist_old, counters_old,
368 freelist_new, counters_new))
369 return 1;
370 } else
371#endif
372 {
373 slab_lock(page);
374 if (page->freelist == freelist_old && page->counters == counters_old) {
375 page->freelist = freelist_new;
376 page->counters = counters_new;
377 slab_unlock(page);
378 return 1;
379 }
380 slab_unlock(page);
381 }
382
383 cpu_relax();
384 stat(s, CMPXCHG_DOUBLE_FAIL);
385
386#ifdef SLUB_DEBUG_CMPXCHG
387 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
388#endif
389
390 return 0;
391}
392
357static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, 393static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
358 void *freelist_old, unsigned long counters_old, 394 void *freelist_old, unsigned long counters_old,
359 void *freelist_new, unsigned long counters_new, 395 void *freelist_new, unsigned long counters_new,
@@ -368,14 +404,19 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
368 } else 404 } else
369#endif 405#endif
370 { 406 {
407 unsigned long flags;
408
409 local_irq_save(flags);
371 slab_lock(page); 410 slab_lock(page);
372 if (page->freelist == freelist_old && page->counters == counters_old) { 411 if (page->freelist == freelist_old && page->counters == counters_old) {
373 page->freelist = freelist_new; 412 page->freelist = freelist_new;
374 page->counters = counters_new; 413 page->counters = counters_new;
375 slab_unlock(page); 414 slab_unlock(page);
415 local_irq_restore(flags);
376 return 1; 416 return 1;
377 } 417 }
378 slab_unlock(page); 418 slab_unlock(page);
419 local_irq_restore(flags);
379 } 420 }
380 421
381 cpu_relax(); 422 cpu_relax();
@@ -1471,7 +1512,7 @@ static inline int acquire_slab(struct kmem_cache *s,
1471 VM_BUG_ON(new.frozen); 1512 VM_BUG_ON(new.frozen);
1472 new.frozen = 1; 1513 new.frozen = 1;
1473 1514
1474 } while (!cmpxchg_double_slab(s, page, 1515 } while (!__cmpxchg_double_slab(s, page,
1475 freelist, counters, 1516 freelist, counters,
1476 NULL, new.counters, 1517 NULL, new.counters,
1477 "lock and freeze")); 1518 "lock and freeze"));
@@ -1709,7 +1750,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1709 new.inuse--; 1750 new.inuse--;
1710 VM_BUG_ON(!new.frozen); 1751 VM_BUG_ON(!new.frozen);
1711 1752
1712 } while (!cmpxchg_double_slab(s, page, 1753 } while (!__cmpxchg_double_slab(s, page,
1713 prior, counters, 1754 prior, counters,
1714 freelist, new.counters, 1755 freelist, new.counters,
1715 "drain percpu freelist")); 1756 "drain percpu freelist"));
@@ -1798,7 +1839,7 @@ redo:
1798 } 1839 }
1799 1840
1800 l = m; 1841 l = m;
1801 if (!cmpxchg_double_slab(s, page, 1842 if (!__cmpxchg_double_slab(s, page,
1802 old.freelist, old.counters, 1843 old.freelist, old.counters,
1803 new.freelist, new.counters, 1844 new.freelist, new.counters,
1804 "unfreezing slab")) 1845 "unfreezing slab"))
@@ -1992,7 +2033,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1992 new.inuse = page->objects; 2033 new.inuse = page->objects;
1993 new.frozen = object != NULL; 2034 new.frozen = object != NULL;
1994 2035
1995 } while (!cmpxchg_double_slab(s, page, 2036 } while (!__cmpxchg_double_slab(s, page,
1996 object, counters, 2037 object, counters,
1997 NULL, new.counters, 2038 NULL, new.counters,
1998 "__slab_alloc")); 2039 "__slab_alloc"));