aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-05-05 16:23:54 -0400
committerPekka Enberg <penberg@kernel.org>2011-05-07 13:25:38 -0400
commit1759415e630e5db0dd2390df9f94892cbfb9a8a2 (patch)
treebfae4443c70ee88c54aa9743ca19ed122af45f8f /mm/slub.c
parent8dc16c6c04b1a82d00a8464ccc08e1fe17d0ff82 (diff)
slub: Remove CONFIG_CMPXCHG_LOCAL ifdeffery
Remove the #ifdefs. This means that the irqsafe_cpu_cmpxchg_double() is used everywhere. There may be performance implications since: A. We now have to manage a transaction ID for all arches B. The interrupt holdoff for arches not supporting CONFIG_CMPXCHG_LOCAL is reduced to a very short irqoff section. There are no multiple irqoff/irqon sequences as a result of this change. Even in the fallback case we only have to do one disable and enable like before. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c56
1 files changed, 0 insertions, 56 deletions
diff --git a/mm/slub.c b/mm/slub.c
index c952fac112e8..461199f019d6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1551,7 +1551,6 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1551 } 1551 }
1552} 1552}
1553 1553
1554#ifdef CONFIG_CMPXCHG_LOCAL
1555#ifdef CONFIG_PREEMPT 1554#ifdef CONFIG_PREEMPT
1556/* 1555/*
1557 * Calculate the next globally unique transaction for disambiguiation 1556 * Calculate the next globally unique transaction for disambiguiation
@@ -1611,17 +1610,12 @@ static inline void note_cmpxchg_failure(const char *n,
1611 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 1610 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
1612} 1611}
1613 1612
1614#endif
1615
1616void init_kmem_cache_cpus(struct kmem_cache *s) 1613void init_kmem_cache_cpus(struct kmem_cache *s)
1617{ 1614{
1618#ifdef CONFIG_CMPXCHG_LOCAL
1619 int cpu; 1615 int cpu;
1620 1616
1621 for_each_possible_cpu(cpu) 1617 for_each_possible_cpu(cpu)
1622 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); 1618 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
1623#endif
1624
1625} 1619}
1626/* 1620/*
1627 * Remove the cpu slab 1621 * Remove the cpu slab
@@ -1654,9 +1648,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1654 page->inuse--; 1648 page->inuse--;
1655 } 1649 }
1656 c->page = NULL; 1650 c->page = NULL;
1657#ifdef CONFIG_CMPXCHG_LOCAL
1658 c->tid = next_tid(c->tid); 1651 c->tid = next_tid(c->tid);
1659#endif
1660 unfreeze_slab(s, page, tail); 1652 unfreeze_slab(s, page, tail);
1661} 1653}
1662 1654
@@ -1791,7 +1783,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1791{ 1783{
1792 void **object; 1784 void **object;
1793 struct page *page; 1785 struct page *page;
1794#ifdef CONFIG_CMPXCHG_LOCAL
1795 unsigned long flags; 1786 unsigned long flags;
1796 1787
1797 local_irq_save(flags); 1788 local_irq_save(flags);
@@ -1803,7 +1794,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1803 */ 1794 */
1804 c = this_cpu_ptr(s->cpu_slab); 1795 c = this_cpu_ptr(s->cpu_slab);
1805#endif 1796#endif
1806#endif
1807 1797
1808 /* We handle __GFP_ZERO in the caller */ 1798 /* We handle __GFP_ZERO in the caller */
1809 gfpflags &= ~__GFP_ZERO; 1799 gfpflags &= ~__GFP_ZERO;
@@ -1831,10 +1821,8 @@ load_freelist:
1831 1821
1832unlock_out: 1822unlock_out:
1833 slab_unlock(page); 1823 slab_unlock(page);
1834#ifdef CONFIG_CMPXCHG_LOCAL
1835 c->tid = next_tid(c->tid); 1824 c->tid = next_tid(c->tid);
1836 local_irq_restore(flags); 1825 local_irq_restore(flags);
1837#endif
1838 stat(s, ALLOC_SLOWPATH); 1826 stat(s, ALLOC_SLOWPATH);
1839 return object; 1827 return object;
1840 1828
@@ -1873,9 +1861,7 @@ load_from_page:
1873 } 1861 }
1874 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) 1862 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
1875 slab_out_of_memory(s, gfpflags, node); 1863 slab_out_of_memory(s, gfpflags, node);
1876#ifdef CONFIG_CMPXCHG_LOCAL
1877 local_irq_restore(flags); 1864 local_irq_restore(flags);
1878#endif
1879 return NULL; 1865 return NULL;
1880debug: 1866debug:
1881 if (!alloc_debug_processing(s, page, object, addr)) 1867 if (!alloc_debug_processing(s, page, object, addr))
@@ -1902,20 +1888,12 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1902{ 1888{
1903 void **object; 1889 void **object;
1904 struct kmem_cache_cpu *c; 1890 struct kmem_cache_cpu *c;
1905#ifdef CONFIG_CMPXCHG_LOCAL
1906 unsigned long tid; 1891 unsigned long tid;
1907#else
1908 unsigned long flags;
1909#endif
1910 1892
1911 if (slab_pre_alloc_hook(s, gfpflags)) 1893 if (slab_pre_alloc_hook(s, gfpflags))
1912 return NULL; 1894 return NULL;
1913 1895
1914#ifndef CONFIG_CMPXCHG_LOCAL
1915 local_irq_save(flags);
1916#else
1917redo: 1896redo:
1918#endif
1919 1897
1920 /* 1898 /*
1921 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 1899 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
@@ -1925,7 +1903,6 @@ redo:
1925 */ 1903 */
1926 c = __this_cpu_ptr(s->cpu_slab); 1904 c = __this_cpu_ptr(s->cpu_slab);
1927 1905
1928#ifdef CONFIG_CMPXCHG_LOCAL
1929 /* 1906 /*
1930 * The transaction ids are globally unique per cpu and per operation on 1907 * The transaction ids are globally unique per cpu and per operation on
1931 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 1908 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
@@ -1934,7 +1911,6 @@ redo:
1934 */ 1911 */
1935 tid = c->tid; 1912 tid = c->tid;
1936 barrier(); 1913 barrier();
1937#endif
1938 1914
1939 object = c->freelist; 1915 object = c->freelist;
1940 if (unlikely(!object || !node_match(c, node))) 1916 if (unlikely(!object || !node_match(c, node)))
@@ -1942,7 +1918,6 @@ redo:
1942 object = __slab_alloc(s, gfpflags, node, addr, c); 1918 object = __slab_alloc(s, gfpflags, node, addr, c);
1943 1919
1944 else { 1920 else {
1945#ifdef CONFIG_CMPXCHG_LOCAL
1946 /* 1921 /*
1947 * The cmpxchg will only match if there was no additonal 1922 * The cmpxchg will only match if there was no additonal
1948 * operation and if we are on the right processor. 1923 * operation and if we are on the right processor.
@@ -1963,16 +1938,9 @@ redo:
1963 note_cmpxchg_failure("slab_alloc", s, tid); 1938 note_cmpxchg_failure("slab_alloc", s, tid);
1964 goto redo; 1939 goto redo;
1965 } 1940 }
1966#else
1967 c->freelist = get_freepointer(s, object);
1968#endif
1969 stat(s, ALLOC_FASTPATH); 1941 stat(s, ALLOC_FASTPATH);
1970 } 1942 }
1971 1943
1972#ifndef CONFIG_CMPXCHG_LOCAL
1973 local_irq_restore(flags);
1974#endif
1975
1976 if (unlikely(gfpflags & __GFP_ZERO) && object) 1944 if (unlikely(gfpflags & __GFP_ZERO) && object)
1977 memset(object, 0, s->objsize); 1945 memset(object, 0, s->objsize);
1978 1946
@@ -2049,11 +2017,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2049{ 2017{
2050 void *prior; 2018 void *prior;
2051 void **object = (void *)x; 2019 void **object = (void *)x;
2052#ifdef CONFIG_CMPXCHG_LOCAL
2053 unsigned long flags; 2020 unsigned long flags;
2054 2021
2055 local_irq_save(flags); 2022 local_irq_save(flags);
2056#endif
2057 slab_lock(page); 2023 slab_lock(page);
2058 stat(s, FREE_SLOWPATH); 2024 stat(s, FREE_SLOWPATH);
2059 2025
@@ -2084,9 +2050,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2084 2050
2085out_unlock: 2051out_unlock:
2086 slab_unlock(page); 2052 slab_unlock(page);
2087#ifdef CONFIG_CMPXCHG_LOCAL
2088 local_irq_restore(flags); 2053 local_irq_restore(flags);
2089#endif
2090 return; 2054 return;
2091 2055
2092slab_empty: 2056slab_empty:
@@ -2098,9 +2062,7 @@ slab_empty:
2098 stat(s, FREE_REMOVE_PARTIAL); 2062 stat(s, FREE_REMOVE_PARTIAL);
2099 } 2063 }
2100 slab_unlock(page); 2064 slab_unlock(page);
2101#ifdef CONFIG_CMPXCHG_LOCAL
2102 local_irq_restore(flags); 2065 local_irq_restore(flags);
2103#endif
2104 stat(s, FREE_SLAB); 2066 stat(s, FREE_SLAB);
2105 discard_slab(s, page); 2067 discard_slab(s, page);
2106} 2068}
@@ -2121,20 +2083,11 @@ static __always_inline void slab_free(struct kmem_cache *s,
2121{ 2083{
2122 void **object = (void *)x; 2084 void **object = (void *)x;
2123 struct kmem_cache_cpu *c; 2085 struct kmem_cache_cpu *c;
2124#ifdef CONFIG_CMPXCHG_LOCAL
2125 unsigned long tid; 2086 unsigned long tid;
2126#else
2127 unsigned long flags;
2128#endif
2129 2087
2130 slab_free_hook(s, x); 2088 slab_free_hook(s, x);
2131 2089
2132#ifndef CONFIG_CMPXCHG_LOCAL
2133 local_irq_save(flags);
2134
2135#else
2136redo: 2090redo:
2137#endif
2138 2091
2139 /* 2092 /*
2140 * Determine the currently cpus per cpu slab. 2093 * Determine the currently cpus per cpu slab.
@@ -2144,15 +2097,12 @@ redo:
2144 */ 2097 */
2145 c = __this_cpu_ptr(s->cpu_slab); 2098 c = __this_cpu_ptr(s->cpu_slab);
2146 2099
2147#ifdef CONFIG_CMPXCHG_LOCAL
2148 tid = c->tid; 2100 tid = c->tid;
2149 barrier(); 2101 barrier();
2150#endif
2151 2102
2152 if (likely(page == c->page && c->node != NUMA_NO_NODE)) { 2103 if (likely(page == c->page && c->node != NUMA_NO_NODE)) {
2153 set_freepointer(s, object, c->freelist); 2104 set_freepointer(s, object, c->freelist);
2154 2105
2155#ifdef CONFIG_CMPXCHG_LOCAL
2156 if (unlikely(!this_cpu_cmpxchg_double( 2106 if (unlikely(!this_cpu_cmpxchg_double(
2157 s->cpu_slab->freelist, s->cpu_slab->tid, 2107 s->cpu_slab->freelist, s->cpu_slab->tid,
2158 c->freelist, tid, 2108 c->freelist, tid,
@@ -2161,16 +2111,10 @@ redo:
2161 note_cmpxchg_failure("slab_free", s, tid); 2111 note_cmpxchg_failure("slab_free", s, tid);
2162 goto redo; 2112 goto redo;
2163 } 2113 }
2164#else
2165 c->freelist = object;
2166#endif
2167 stat(s, FREE_FASTPATH); 2114 stat(s, FREE_FASTPATH);
2168 } else 2115 } else
2169 __slab_free(s, page, x, addr); 2116 __slab_free(s, page, x, addr);
2170 2117
2171#ifndef CONFIG_CMPXCHG_LOCAL
2172 local_irq_restore(flags);
2173#endif
2174} 2118}
2175 2119
2176void kmem_cache_free(struct kmem_cache *s, void *x) 2120void kmem_cache_free(struct kmem_cache *s, void *x)