aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-19 12:08:49 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-19 12:08:49 -0500
commit00e962c5408b9f2d0bebd2308673fe982cb9a5fe (patch)
treebe1095cead2711eb26572e8e68054c6fd0f7f3a2 /mm/slub.c
parentaa3f98037ce3bcf850bb41f7530754d27481a792 (diff)
Revert "SLUB: Alternate fast paths using cmpxchg_local"
This reverts commit 1f84260c8ce3b1ce26d4c1d6dedc2f33a3a29c0c, which is suspected to be the reason for some very occasional and hard-to-trigger crashes that usually look related to memory allocation (mostly reported in networking, but since that's generally the most common source of shortlived allocations - and allocations in interrupt contexts - that in itself is not a big clue). See for example http://bugzilla.kernel.org/show_bug.cgi?id=9973 http://lkml.org/lkml/2008/2/19/278 etc. One promising suspicion for what the root cause of bug is (which also explains why it's so hard to trigger in practice) came from Eric Dumazet: "I wonder how SLUB_FASTPATH is supposed to work, since it is affected by a classical ABA problem of lockless algo. cmpxchg_local(&c->freelist, object, object[c->offset]) can succeed, while an interrupt came (on this cpu), and several allocations were done, and one free was performed at the end of this interruption, so 'object' was recycled. c->freelist can then contain the previous value (object), but object[c->offset] was changed by IRQ. We then put back in freelist an already allocated object." but another reason for the revert is simply that everybody agrees that this code was the main suspect just by virtue of the pattern of oopses. Cc: Torsten Kaiser <just.for.lkml@googlemail.com> Cc: Christoph Lameter <clameter@sgi.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Ingo Molnar <mingo@elte.hu> Cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c87
1 files changed, 1 insertions, 86 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 4b3895cb90ee..74c65af0a54f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -149,13 +149,6 @@ static inline void ClearSlabDebug(struct page *page)
149/* Enable to test recovery from slab corruption on boot */ 149/* Enable to test recovery from slab corruption on boot */
150#undef SLUB_RESILIENCY_TEST 150#undef SLUB_RESILIENCY_TEST
151 151
152/*
153 * Currently fastpath is not supported if preemption is enabled.
154 */
155#if defined(CONFIG_FAST_CMPXCHG_LOCAL) && !defined(CONFIG_PREEMPT)
156#define SLUB_FASTPATH
157#endif
158
159#if PAGE_SHIFT <= 12 152#if PAGE_SHIFT <= 12
160 153
161/* 154/*
@@ -1514,11 +1507,7 @@ static void *__slab_alloc(struct kmem_cache *s,
1514{ 1507{
1515 void **object; 1508 void **object;
1516 struct page *new; 1509 struct page *new;
1517#ifdef SLUB_FASTPATH
1518 unsigned long flags;
1519 1510
1520 local_irq_save(flags);
1521#endif
1522 if (!c->page) 1511 if (!c->page)
1523 goto new_slab; 1512 goto new_slab;
1524 1513
@@ -1541,9 +1530,6 @@ load_freelist:
1541unlock_out: 1530unlock_out:
1542 slab_unlock(c->page); 1531 slab_unlock(c->page);
1543 stat(c, ALLOC_SLOWPATH); 1532 stat(c, ALLOC_SLOWPATH);
1544#ifdef SLUB_FASTPATH
1545 local_irq_restore(flags);
1546#endif
1547 return object; 1533 return object;
1548 1534
1549another_slab: 1535another_slab:
@@ -1575,9 +1561,7 @@ new_slab:
1575 c->page = new; 1561 c->page = new;
1576 goto load_freelist; 1562 goto load_freelist;
1577 } 1563 }
1578#ifdef SLUB_FASTPATH 1564
1579 local_irq_restore(flags);
1580#endif
1581 /* 1565 /*
1582 * No memory available. 1566 * No memory available.
1583 * 1567 *
@@ -1619,34 +1603,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1619{ 1603{
1620 void **object; 1604 void **object;
1621 struct kmem_cache_cpu *c; 1605 struct kmem_cache_cpu *c;
1622
1623/*
1624 * The SLUB_FASTPATH path is provisional and is currently disabled if the
1625 * kernel is compiled with preemption or if the arch does not support
1626 * fast cmpxchg operations. There are a couple of coming changes that will
1627 * simplify matters and allow preemption. Ultimately we may end up making
1628 * SLUB_FASTPATH the default.
1629 *
1630 * 1. The introduction of the per cpu allocator will avoid array lookups
1631 * through get_cpu_slab(). A special register can be used instead.
1632 *
1633 * 2. The introduction of per cpu atomic operations (cpu_ops) means that
1634 * we can realize the logic here entirely with per cpu atomics. The
1635 * per cpu atomic ops will take care of the preemption issues.
1636 */
1637
1638#ifdef SLUB_FASTPATH
1639 c = get_cpu_slab(s, raw_smp_processor_id());
1640 do {
1641 object = c->freelist;
1642 if (unlikely(is_end(object) || !node_match(c, node))) {
1643 object = __slab_alloc(s, gfpflags, node, addr, c);
1644 break;
1645 }
1646 stat(c, ALLOC_FASTPATH);
1647 } while (cmpxchg_local(&c->freelist, object, object[c->offset])
1648 != object);
1649#else
1650 unsigned long flags; 1606 unsigned long flags;
1651 1607
1652 local_irq_save(flags); 1608 local_irq_save(flags);
@@ -1661,7 +1617,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1661 stat(c, ALLOC_FASTPATH); 1617 stat(c, ALLOC_FASTPATH);
1662 } 1618 }
1663 local_irq_restore(flags); 1619 local_irq_restore(flags);
1664#endif
1665 1620
1666 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1621 if (unlikely((gfpflags & __GFP_ZERO) && object))
1667 memset(object, 0, c->objsize); 1622 memset(object, 0, c->objsize);
@@ -1698,11 +1653,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
1698 void **object = (void *)x; 1653 void **object = (void *)x;
1699 struct kmem_cache_cpu *c; 1654 struct kmem_cache_cpu *c;
1700 1655
1701#ifdef SLUB_FASTPATH
1702 unsigned long flags;
1703
1704 local_irq_save(flags);
1705#endif
1706 c = get_cpu_slab(s, raw_smp_processor_id()); 1656 c = get_cpu_slab(s, raw_smp_processor_id());
1707 stat(c, FREE_SLOWPATH); 1657 stat(c, FREE_SLOWPATH);
1708 slab_lock(page); 1658 slab_lock(page);
@@ -1734,9 +1684,6 @@ checks_ok:
1734 1684
1735out_unlock: 1685out_unlock:
1736 slab_unlock(page); 1686 slab_unlock(page);
1737#ifdef SLUB_FASTPATH
1738 local_irq_restore(flags);
1739#endif
1740 return; 1687 return;
1741 1688
1742slab_empty: 1689slab_empty:
@@ -1749,9 +1696,6 @@ slab_empty:
1749 } 1696 }
1750 slab_unlock(page); 1697 slab_unlock(page);
1751 stat(c, FREE_SLAB); 1698 stat(c, FREE_SLAB);
1752#ifdef SLUB_FASTPATH
1753 local_irq_restore(flags);
1754#endif
1755 discard_slab(s, page); 1699 discard_slab(s, page);
1756 return; 1700 return;
1757 1701
@@ -1777,34 +1721,6 @@ static __always_inline void slab_free(struct kmem_cache *s,
1777{ 1721{
1778 void **object = (void *)x; 1722 void **object = (void *)x;
1779 struct kmem_cache_cpu *c; 1723 struct kmem_cache_cpu *c;
1780
1781#ifdef SLUB_FASTPATH
1782 void **freelist;
1783
1784 c = get_cpu_slab(s, raw_smp_processor_id());
1785 debug_check_no_locks_freed(object, s->objsize);
1786 do {
1787 freelist = c->freelist;
1788 barrier();
1789 /*
1790 * If the compiler would reorder the retrieval of c->page to
1791 * come before c->freelist then an interrupt could
1792 * change the cpu slab before we retrieve c->freelist. We
1793 * could be matching on a page no longer active and put the
1794 * object onto the freelist of the wrong slab.
1795 *
1796 * On the other hand: If we already have the freelist pointer
1797 * then any change of cpu_slab will cause the cmpxchg to fail
1798 * since the freelist pointers are unique per slab.
1799 */
1800 if (unlikely(page != c->page || c->node < 0)) {
1801 __slab_free(s, page, x, addr, c->offset);
1802 break;
1803 }
1804 object[c->offset] = freelist;
1805 stat(c, FREE_FASTPATH);
1806 } while (cmpxchg_local(&c->freelist, freelist, object) != freelist);
1807#else
1808 unsigned long flags; 1724 unsigned long flags;
1809 1725
1810 local_irq_save(flags); 1726 local_irq_save(flags);
@@ -1818,7 +1734,6 @@ static __always_inline void slab_free(struct kmem_cache *s,
1818 __slab_free(s, page, x, addr, c->offset); 1734 __slab_free(s, page, x, addr, c->offset);
1819 1735
1820 local_irq_restore(flags); 1736 local_irq_restore(flags);
1821#endif
1822} 1737}
1823 1738
1824void kmem_cache_free(struct kmem_cache *s, void *x) 1739void kmem_cache_free(struct kmem_cache *s, void *x)