diff options
-rw-r--r-- | include/linux/slub_def.h | 2 | ||||
-rw-r--r-- | mm/slub.c | 56 |
2 files changed, 0 insertions, 58 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 45ca123e8002..ca0c076b2374 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -37,9 +37,7 @@ enum stat_item { | |||
37 | 37 | ||
38 | struct kmem_cache_cpu { | 38 | struct kmem_cache_cpu { |
39 | void **freelist; /* Pointer to next available object */ | 39 | void **freelist; /* Pointer to next available object */ |
40 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
41 | unsigned long tid; /* Globally unique transaction id */ | 40 | unsigned long tid; /* Globally unique transaction id */ |
42 | #endif | ||
43 | struct page *page; /* The slab from which we are allocating */ | 41 | struct page *page; /* The slab from which we are allocating */ |
44 | int node; /* The node of the page (or -1 for debug) */ | 42 | int node; /* The node of the page (or -1 for debug) */ |
45 | #ifdef CONFIG_SLUB_STATS | 43 | #ifdef CONFIG_SLUB_STATS |
@@ -1551,7 +1551,6 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
1551 | } | 1551 | } |
1552 | } | 1552 | } |
1553 | 1553 | ||
1554 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1555 | #ifdef CONFIG_PREEMPT | 1554 | #ifdef CONFIG_PREEMPT |
1556 | /* | 1555 | /* |
1557 | * Calculate the next globally unique transaction for disambiguiation | 1556 | * Calculate the next globally unique transaction for disambiguiation |
@@ -1611,17 +1610,12 @@ static inline void note_cmpxchg_failure(const char *n, | |||
1611 | stat(s, CMPXCHG_DOUBLE_CPU_FAIL); | 1610 | stat(s, CMPXCHG_DOUBLE_CPU_FAIL); |
1612 | } | 1611 | } |
1613 | 1612 | ||
1614 | #endif | ||
1615 | |||
1616 | void init_kmem_cache_cpus(struct kmem_cache *s) | 1613 | void init_kmem_cache_cpus(struct kmem_cache *s) |
1617 | { | 1614 | { |
1618 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1619 | int cpu; | 1615 | int cpu; |
1620 | 1616 | ||
1621 | for_each_possible_cpu(cpu) | 1617 | for_each_possible_cpu(cpu) |
1622 | per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); | 1618 | per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); |
1623 | #endif | ||
1624 | |||
1625 | } | 1619 | } |
1626 | /* | 1620 | /* |
1627 | * Remove the cpu slab | 1621 | * Remove the cpu slab |
@@ -1654,9 +1648,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | |||
1654 | page->inuse--; | 1648 | page->inuse--; |
1655 | } | 1649 | } |
1656 | c->page = NULL; | 1650 | c->page = NULL; |
1657 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1658 | c->tid = next_tid(c->tid); | 1651 | c->tid = next_tid(c->tid); |
1659 | #endif | ||
1660 | unfreeze_slab(s, page, tail); | 1652 | unfreeze_slab(s, page, tail); |
1661 | } | 1653 | } |
1662 | 1654 | ||
@@ -1791,7 +1783,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
1791 | { | 1783 | { |
1792 | void **object; | 1784 | void **object; |
1793 | struct page *page; | 1785 | struct page *page; |
1794 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1795 | unsigned long flags; | 1786 | unsigned long flags; |
1796 | 1787 | ||
1797 | local_irq_save(flags); | 1788 | local_irq_save(flags); |
@@ -1803,7 +1794,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
1803 | */ | 1794 | */ |
1804 | c = this_cpu_ptr(s->cpu_slab); | 1795 | c = this_cpu_ptr(s->cpu_slab); |
1805 | #endif | 1796 | #endif |
1806 | #endif | ||
1807 | 1797 | ||
1808 | /* We handle __GFP_ZERO in the caller */ | 1798 | /* We handle __GFP_ZERO in the caller */ |
1809 | gfpflags &= ~__GFP_ZERO; | 1799 | gfpflags &= ~__GFP_ZERO; |
@@ -1831,10 +1821,8 @@ load_freelist: | |||
1831 | 1821 | ||
1832 | unlock_out: | 1822 | unlock_out: |
1833 | slab_unlock(page); | 1823 | slab_unlock(page); |
1834 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1835 | c->tid = next_tid(c->tid); | 1824 | c->tid = next_tid(c->tid); |
1836 | local_irq_restore(flags); | 1825 | local_irq_restore(flags); |
1837 | #endif | ||
1838 | stat(s, ALLOC_SLOWPATH); | 1826 | stat(s, ALLOC_SLOWPATH); |
1839 | return object; | 1827 | return object; |
1840 | 1828 | ||
@@ -1873,9 +1861,7 @@ load_from_page: | |||
1873 | } | 1861 | } |
1874 | if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) | 1862 | if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) |
1875 | slab_out_of_memory(s, gfpflags, node); | 1863 | slab_out_of_memory(s, gfpflags, node); |
1876 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1877 | local_irq_restore(flags); | 1864 | local_irq_restore(flags); |
1878 | #endif | ||
1879 | return NULL; | 1865 | return NULL; |
1880 | debug: | 1866 | debug: |
1881 | if (!alloc_debug_processing(s, page, object, addr)) | 1867 | if (!alloc_debug_processing(s, page, object, addr)) |
@@ -1902,20 +1888,12 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1902 | { | 1888 | { |
1903 | void **object; | 1889 | void **object; |
1904 | struct kmem_cache_cpu *c; | 1890 | struct kmem_cache_cpu *c; |
1905 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1906 | unsigned long tid; | 1891 | unsigned long tid; |
1907 | #else | ||
1908 | unsigned long flags; | ||
1909 | #endif | ||
1910 | 1892 | ||
1911 | if (slab_pre_alloc_hook(s, gfpflags)) | 1893 | if (slab_pre_alloc_hook(s, gfpflags)) |
1912 | return NULL; | 1894 | return NULL; |
1913 | 1895 | ||
1914 | #ifndef CONFIG_CMPXCHG_LOCAL | ||
1915 | local_irq_save(flags); | ||
1916 | #else | ||
1917 | redo: | 1896 | redo: |
1918 | #endif | ||
1919 | 1897 | ||
1920 | /* | 1898 | /* |
1921 | * Must read kmem_cache cpu data via this cpu ptr. Preemption is | 1899 | * Must read kmem_cache cpu data via this cpu ptr. Preemption is |
@@ -1925,7 +1903,6 @@ redo: | |||
1925 | */ | 1903 | */ |
1926 | c = __this_cpu_ptr(s->cpu_slab); | 1904 | c = __this_cpu_ptr(s->cpu_slab); |
1927 | 1905 | ||
1928 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1929 | /* | 1906 | /* |
1930 | * The transaction ids are globally unique per cpu and per operation on | 1907 | * The transaction ids are globally unique per cpu and per operation on |
1931 | * a per cpu queue. Thus they can be guarantee that the cmpxchg_double | 1908 | * a per cpu queue. Thus they can be guarantee that the cmpxchg_double |
@@ -1934,7 +1911,6 @@ redo: | |||
1934 | */ | 1911 | */ |
1935 | tid = c->tid; | 1912 | tid = c->tid; |
1936 | barrier(); | 1913 | barrier(); |
1937 | #endif | ||
1938 | 1914 | ||
1939 | object = c->freelist; | 1915 | object = c->freelist; |
1940 | if (unlikely(!object || !node_match(c, node))) | 1916 | if (unlikely(!object || !node_match(c, node))) |
@@ -1942,7 +1918,6 @@ redo: | |||
1942 | object = __slab_alloc(s, gfpflags, node, addr, c); | 1918 | object = __slab_alloc(s, gfpflags, node, addr, c); |
1943 | 1919 | ||
1944 | else { | 1920 | else { |
1945 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1946 | /* | 1921 | /* |
1947 | * The cmpxchg will only match if there was no additonal | 1922 | * The cmpxchg will only match if there was no additonal |
1948 | * operation and if we are on the right processor. | 1923 | * operation and if we are on the right processor. |
@@ -1963,16 +1938,9 @@ redo: | |||
1963 | note_cmpxchg_failure("slab_alloc", s, tid); | 1938 | note_cmpxchg_failure("slab_alloc", s, tid); |
1964 | goto redo; | 1939 | goto redo; |
1965 | } | 1940 | } |
1966 | #else | ||
1967 | c->freelist = get_freepointer(s, object); | ||
1968 | #endif | ||
1969 | stat(s, ALLOC_FASTPATH); | 1941 | stat(s, ALLOC_FASTPATH); |
1970 | } | 1942 | } |
1971 | 1943 | ||
1972 | #ifndef CONFIG_CMPXCHG_LOCAL | ||
1973 | local_irq_restore(flags); | ||
1974 | #endif | ||
1975 | |||
1976 | if (unlikely(gfpflags & __GFP_ZERO) && object) | 1944 | if (unlikely(gfpflags & __GFP_ZERO) && object) |
1977 | memset(object, 0, s->objsize); | 1945 | memset(object, 0, s->objsize); |
1978 | 1946 | ||
@@ -2049,11 +2017,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2049 | { | 2017 | { |
2050 | void *prior; | 2018 | void *prior; |
2051 | void **object = (void *)x; | 2019 | void **object = (void *)x; |
2052 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
2053 | unsigned long flags; | 2020 | unsigned long flags; |
2054 | 2021 | ||
2055 | local_irq_save(flags); | 2022 | local_irq_save(flags); |
2056 | #endif | ||
2057 | slab_lock(page); | 2023 | slab_lock(page); |
2058 | stat(s, FREE_SLOWPATH); | 2024 | stat(s, FREE_SLOWPATH); |
2059 | 2025 | ||
@@ -2084,9 +2050,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2084 | 2050 | ||
2085 | out_unlock: | 2051 | out_unlock: |
2086 | slab_unlock(page); | 2052 | slab_unlock(page); |
2087 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
2088 | local_irq_restore(flags); | 2053 | local_irq_restore(flags); |
2089 | #endif | ||
2090 | return; | 2054 | return; |
2091 | 2055 | ||
2092 | slab_empty: | 2056 | slab_empty: |
@@ -2098,9 +2062,7 @@ slab_empty: | |||
2098 | stat(s, FREE_REMOVE_PARTIAL); | 2062 | stat(s, FREE_REMOVE_PARTIAL); |
2099 | } | 2063 | } |
2100 | slab_unlock(page); | 2064 | slab_unlock(page); |
2101 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
2102 | local_irq_restore(flags); | 2065 | local_irq_restore(flags); |
2103 | #endif | ||
2104 | stat(s, FREE_SLAB); | 2066 | stat(s, FREE_SLAB); |
2105 | discard_slab(s, page); | 2067 | discard_slab(s, page); |
2106 | } | 2068 | } |
@@ -2121,20 +2083,11 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
2121 | { | 2083 | { |
2122 | void **object = (void *)x; | 2084 | void **object = (void *)x; |
2123 | struct kmem_cache_cpu *c; | 2085 | struct kmem_cache_cpu *c; |
2124 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
2125 | unsigned long tid; | 2086 | unsigned long tid; |
2126 | #else | ||
2127 | unsigned long flags; | ||
2128 | #endif | ||
2129 | 2087 | ||
2130 | slab_free_hook(s, x); | 2088 | slab_free_hook(s, x); |
2131 | 2089 | ||
2132 | #ifndef CONFIG_CMPXCHG_LOCAL | ||
2133 | local_irq_save(flags); | ||
2134 | |||
2135 | #else | ||
2136 | redo: | 2090 | redo: |
2137 | #endif | ||
2138 | 2091 | ||
2139 | /* | 2092 | /* |
2140 | * Determine the currently cpus per cpu slab. | 2093 | * Determine the currently cpus per cpu slab. |
@@ -2144,15 +2097,12 @@ redo: | |||
2144 | */ | 2097 | */ |
2145 | c = __this_cpu_ptr(s->cpu_slab); | 2098 | c = __this_cpu_ptr(s->cpu_slab); |
2146 | 2099 | ||
2147 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
2148 | tid = c->tid; | 2100 | tid = c->tid; |
2149 | barrier(); | 2101 | barrier(); |
2150 | #endif | ||
2151 | 2102 | ||
2152 | if (likely(page == c->page && c->node != NUMA_NO_NODE)) { | 2103 | if (likely(page == c->page && c->node != NUMA_NO_NODE)) { |
2153 | set_freepointer(s, object, c->freelist); | 2104 | set_freepointer(s, object, c->freelist); |
2154 | 2105 | ||
2155 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
2156 | if (unlikely(!this_cpu_cmpxchg_double( | 2106 | if (unlikely(!this_cpu_cmpxchg_double( |
2157 | s->cpu_slab->freelist, s->cpu_slab->tid, | 2107 | s->cpu_slab->freelist, s->cpu_slab->tid, |
2158 | c->freelist, tid, | 2108 | c->freelist, tid, |
@@ -2161,16 +2111,10 @@ redo: | |||
2161 | note_cmpxchg_failure("slab_free", s, tid); | 2111 | note_cmpxchg_failure("slab_free", s, tid); |
2162 | goto redo; | 2112 | goto redo; |
2163 | } | 2113 | } |
2164 | #else | ||
2165 | c->freelist = object; | ||
2166 | #endif | ||
2167 | stat(s, FREE_FASTPATH); | 2114 | stat(s, FREE_FASTPATH); |
2168 | } else | 2115 | } else |
2169 | __slab_free(s, page, x, addr); | 2116 | __slab_free(s, page, x, addr); |
2170 | 2117 | ||
2171 | #ifndef CONFIG_CMPXCHG_LOCAL | ||
2172 | local_irq_restore(flags); | ||
2173 | #endif | ||
2174 | } | 2118 | } |
2175 | 2119 | ||
2176 | void kmem_cache_free(struct kmem_cache *s, void *x) | 2120 | void kmem_cache_free(struct kmem_cache *s, void *x) |