diff options
author | Christoph Lameter <clameter@sgi.com> | 2008-01-08 02:20:30 -0500 |
---|---|---|
committer | Christoph Lameter <christoph@stapp.engr.sgi.com> | 2008-02-07 20:47:41 -0500 |
commit | 1f84260c8ce3b1ce26d4c1d6dedc2f33a3a29c0c (patch) | |
tree | d57e305168291f1b0f5f2d42853d2c2419f4140b /mm/slub.c | |
parent | 683d0baad3d6e18134927f8c28ee804dbe10fe71 (diff) |
SLUB: Alternate fast paths using cmpxchg_local
Provide an alternate implementation of the SLUB fast paths for alloc
and free using cmpxchg_local. The cmpxchg_local fast path is selected
for arches that have CONFIG_FAST_CMPXCHG_LOCAL set. An arch should only
set CONFIG_FAST_CMPXCHG_LOCAL if the cmpxchg_local is faster than an
interrupt enable/disable sequence. This is known to be true for both
x86 platforms so set FAST_CMPXCHG_LOCAL for both arches.
Currently another requirement for the fastpath is that the kernel is
compiled without preemption. The restriction will go away with the
introduction of a new per cpu allocator and new per cpu operations.
The advantages of a cmpxchg_local based fast path are:
1. Potentially lower cycle count (30%-60% faster)
2. There is no need to disable and enable interrupts on the fast path.
Currently interrupts have to be disabled and enabled on every
slab operation. This is likely avoiding a significant percentage
of interrupt off / on sequences in the kernel.
3. The disposal of freed slabs can occur with interrupts enabled.
The alternate path is realized using #ifdef's. Several attempts to do the
same with macros and inline functions resulted in a mess (in particular due
to the strange way that local_interrupt_save() handles its argument and due
to the need to define macros/functions that sometimes disable interrupts
and sometimes do something else).
[clameter: Stripped preempt bits and disabled fastpath if preempt is enabled]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 93 |
1 files changed, 88 insertions, 5 deletions
@@ -149,6 +149,13 @@ static inline void ClearSlabDebug(struct page *page) | |||
149 | /* Enable to test recovery from slab corruption on boot */ | 149 | /* Enable to test recovery from slab corruption on boot */ |
150 | #undef SLUB_RESILIENCY_TEST | 150 | #undef SLUB_RESILIENCY_TEST |
151 | 151 | ||
152 | /* | ||
153 | * Currently fastpath is not supported if preemption is enabled. | ||
154 | */ | ||
155 | #if defined(CONFIG_FAST_CMPXCHG_LOCAL) && !defined(CONFIG_PREEMPT) | ||
156 | #define SLUB_FASTPATH | ||
157 | #endif | ||
158 | |||
152 | #if PAGE_SHIFT <= 12 | 159 | #if PAGE_SHIFT <= 12 |
153 | 160 | ||
154 | /* | 161 | /* |
@@ -1493,7 +1500,11 @@ static void *__slab_alloc(struct kmem_cache *s, | |||
1493 | { | 1500 | { |
1494 | void **object; | 1501 | void **object; |
1495 | struct page *new; | 1502 | struct page *new; |
1503 | #ifdef SLUB_FASTPATH | ||
1504 | unsigned long flags; | ||
1496 | 1505 | ||
1506 | local_irq_save(flags); | ||
1507 | #endif | ||
1497 | if (!c->page) | 1508 | if (!c->page) |
1498 | goto new_slab; | 1509 | goto new_slab; |
1499 | 1510 | ||
@@ -1512,7 +1523,12 @@ load_freelist: | |||
1512 | c->page->inuse = s->objects; | 1523 | c->page->inuse = s->objects; |
1513 | c->page->freelist = c->page->end; | 1524 | c->page->freelist = c->page->end; |
1514 | c->node = page_to_nid(c->page); | 1525 | c->node = page_to_nid(c->page); |
1526 | unlock_out: | ||
1515 | slab_unlock(c->page); | 1527 | slab_unlock(c->page); |
1528 | out: | ||
1529 | #ifdef SLUB_FASTPATH | ||
1530 | local_irq_restore(flags); | ||
1531 | #endif | ||
1516 | return object; | 1532 | return object; |
1517 | 1533 | ||
1518 | another_slab: | 1534 | another_slab: |
@@ -1542,7 +1558,8 @@ new_slab: | |||
1542 | c->page = new; | 1558 | c->page = new; |
1543 | goto load_freelist; | 1559 | goto load_freelist; |
1544 | } | 1560 | } |
1545 | return NULL; | 1561 | object = NULL; |
1562 | goto out; | ||
1546 | debug: | 1563 | debug: |
1547 | object = c->page->freelist; | 1564 | object = c->page->freelist; |
1548 | if (!alloc_debug_processing(s, c->page, object, addr)) | 1565 | if (!alloc_debug_processing(s, c->page, object, addr)) |
@@ -1551,8 +1568,7 @@ debug: | |||
1551 | c->page->inuse++; | 1568 | c->page->inuse++; |
1552 | c->page->freelist = object[c->offset]; | 1569 | c->page->freelist = object[c->offset]; |
1553 | c->node = -1; | 1570 | c->node = -1; |
1554 | slab_unlock(c->page); | 1571 | goto unlock_out; |
1555 | return object; | ||
1556 | } | 1572 | } |
1557 | 1573 | ||
1558 | /* | 1574 | /* |
@@ -1569,9 +1585,36 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1569 | gfp_t gfpflags, int node, void *addr) | 1585 | gfp_t gfpflags, int node, void *addr) |
1570 | { | 1586 | { |
1571 | void **object; | 1587 | void **object; |
1572 | unsigned long flags; | ||
1573 | struct kmem_cache_cpu *c; | 1588 | struct kmem_cache_cpu *c; |
1574 | 1589 | ||
1590 | /* | ||
1591 | * The SLUB_FASTPATH path is provisional and is currently disabled if the | ||
1592 | * kernel is compiled with preemption or if the arch does not support | ||
1593 | * fast cmpxchg operations. There are a couple of coming changes that will | ||
1594 | * simplify matters and allow preemption. Ultimately we may end up making | ||
1595 | * SLUB_FASTPATH the default. | ||
1596 | * | ||
1597 | * 1. The introduction of the per cpu allocator will avoid array lookups | ||
1598 | * through get_cpu_slab(). A special register can be used instead. | ||
1599 | * | ||
1600 | * 2. The introduction of per cpu atomic operations (cpu_ops) means that | ||
1601 | * we can realize the logic here entirely with per cpu atomics. The | ||
1602 | * per cpu atomic ops will take care of the preemption issues. | ||
1603 | */ | ||
1604 | |||
1605 | #ifdef SLUB_FASTPATH | ||
1606 | c = get_cpu_slab(s, raw_smp_processor_id()); | ||
1607 | do { | ||
1608 | object = c->freelist; | ||
1609 | if (unlikely(is_end(object) || !node_match(c, node))) { | ||
1610 | object = __slab_alloc(s, gfpflags, node, addr, c); | ||
1611 | break; | ||
1612 | } | ||
1613 | } while (cmpxchg_local(&c->freelist, object, object[c->offset]) | ||
1614 | != object); | ||
1615 | #else | ||
1616 | unsigned long flags; | ||
1617 | |||
1575 | local_irq_save(flags); | 1618 | local_irq_save(flags); |
1576 | c = get_cpu_slab(s, smp_processor_id()); | 1619 | c = get_cpu_slab(s, smp_processor_id()); |
1577 | if (unlikely(is_end(c->freelist) || !node_match(c, node))) | 1620 | if (unlikely(is_end(c->freelist) || !node_match(c, node))) |
@@ -1583,6 +1626,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1583 | c->freelist = object[c->offset]; | 1626 | c->freelist = object[c->offset]; |
1584 | } | 1627 | } |
1585 | local_irq_restore(flags); | 1628 | local_irq_restore(flags); |
1629 | #endif | ||
1586 | 1630 | ||
1587 | if (unlikely((gfpflags & __GFP_ZERO) && object)) | 1631 | if (unlikely((gfpflags & __GFP_ZERO) && object)) |
1588 | memset(object, 0, c->objsize); | 1632 | memset(object, 0, c->objsize); |
@@ -1618,6 +1662,11 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
1618 | void *prior; | 1662 | void *prior; |
1619 | void **object = (void *)x; | 1663 | void **object = (void *)x; |
1620 | 1664 | ||
1665 | #ifdef SLUB_FASTPATH | ||
1666 | unsigned long flags; | ||
1667 | |||
1668 | local_irq_save(flags); | ||
1669 | #endif | ||
1621 | slab_lock(page); | 1670 | slab_lock(page); |
1622 | 1671 | ||
1623 | if (unlikely(SlabDebug(page))) | 1672 | if (unlikely(SlabDebug(page))) |
@@ -1643,6 +1692,9 @@ checks_ok: | |||
1643 | 1692 | ||
1644 | out_unlock: | 1693 | out_unlock: |
1645 | slab_unlock(page); | 1694 | slab_unlock(page); |
1695 | #ifdef SLUB_FASTPATH | ||
1696 | local_irq_restore(flags); | ||
1697 | #endif | ||
1646 | return; | 1698 | return; |
1647 | 1699 | ||
1648 | slab_empty: | 1700 | slab_empty: |
@@ -1653,6 +1705,9 @@ slab_empty: | |||
1653 | remove_partial(s, page); | 1705 | remove_partial(s, page); |
1654 | 1706 | ||
1655 | slab_unlock(page); | 1707 | slab_unlock(page); |
1708 | #ifdef SLUB_FASTPATH | ||
1709 | local_irq_restore(flags); | ||
1710 | #endif | ||
1656 | discard_slab(s, page); | 1711 | discard_slab(s, page); |
1657 | return; | 1712 | return; |
1658 | 1713 | ||
@@ -1677,9 +1732,36 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
1677 | struct page *page, void *x, void *addr) | 1732 | struct page *page, void *x, void *addr) |
1678 | { | 1733 | { |
1679 | void **object = (void *)x; | 1734 | void **object = (void *)x; |
1680 | unsigned long flags; | ||
1681 | struct kmem_cache_cpu *c; | 1735 | struct kmem_cache_cpu *c; |
1682 | 1736 | ||
1737 | #ifdef SLUB_FASTPATH | ||
1738 | void **freelist; | ||
1739 | |||
1740 | c = get_cpu_slab(s, raw_smp_processor_id()); | ||
1741 | debug_check_no_locks_freed(object, s->objsize); | ||
1742 | do { | ||
1743 | freelist = c->freelist; | ||
1744 | barrier(); | ||
1745 | /* | ||
1746 | * If the compiler would reorder the retrieval of c->page to | ||
1747 | * come before c->freelist then an interrupt could | ||
1748 | * change the cpu slab before we retrieve c->freelist. We | ||
1749 | * could be matching on a page no longer active and put the | ||
1750 | * object onto the freelist of the wrong slab. | ||
1751 | * | ||
1752 | * On the other hand: If we already have the freelist pointer | ||
1753 | * then any change of cpu_slab will cause the cmpxchg to fail | ||
1754 | * since the freelist pointers are unique per slab. | ||
1755 | */ | ||
1756 | if (unlikely(page != c->page || c->node < 0)) { | ||
1757 | __slab_free(s, page, x, addr, c->offset); | ||
1758 | break; | ||
1759 | } | ||
1760 | object[c->offset] = freelist; | ||
1761 | } while (cmpxchg_local(&c->freelist, freelist, object) != freelist); | ||
1762 | #else | ||
1763 | unsigned long flags; | ||
1764 | |||
1683 | local_irq_save(flags); | 1765 | local_irq_save(flags); |
1684 | debug_check_no_locks_freed(object, s->objsize); | 1766 | debug_check_no_locks_freed(object, s->objsize); |
1685 | c = get_cpu_slab(s, smp_processor_id()); | 1767 | c = get_cpu_slab(s, smp_processor_id()); |
@@ -1690,6 +1772,7 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
1690 | __slab_free(s, page, x, addr, c->offset); | 1772 | __slab_free(s, page, x, addr, c->offset); |
1691 | 1773 | ||
1692 | local_irq_restore(flags); | 1774 | local_irq_restore(flags); |
1775 | #endif | ||
1693 | } | 1776 | } |
1694 | 1777 | ||
1695 | void kmem_cache_free(struct kmem_cache *s, void *x) | 1778 | void kmem_cache_free(struct kmem_cache *s, void *x) |