diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-23 13:10:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-23 13:10:44 -0400 |
commit | 4867faab1e3eb8cc3f74e390357615d9b8e8cda6 (patch) | |
tree | 7a6cb9175c483a0f7e3672185e7f9fb25f74caa3 /mm | |
parent | 6fad2b5b649fa1fa6ee7293222815f5b62499889 (diff) | |
parent | bfb91fb650e988c3c347e50fc75fedb7d4f0c018 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
slub: Deal with hyperthetical case of PAGE_SIZE > 2M
slub: Remove node check in slab_free
slub: avoid label inside conditional
slub: Make CONFIG_DEBUG_PAGE_ALLOC work with new fastpath
slub: Avoid warning for !CONFIG_SLUB_DEBUG
slub: Remove CONFIG_CMPXCHG_LOCAL ifdeffery
slub: Move debug handlign in __slab_free
slub: Move node determination out of hotpath
slub: Eliminate repeated use of c->page through a new page variable
slub: get_map() function to establish map of free objects in a slab
slub: Use NUMA_NO_NODE in get_partial
slub: Fix a typo in config name
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 165 |
1 files changed, 65 insertions, 100 deletions
@@ -261,6 +261,18 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object) | |||
261 | return *(void **)(object + s->offset); | 261 | return *(void **)(object + s->offset); |
262 | } | 262 | } |
263 | 263 | ||
264 | static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) | ||
265 | { | ||
266 | void *p; | ||
267 | |||
268 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
269 | probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p)); | ||
270 | #else | ||
271 | p = get_freepointer(s, object); | ||
272 | #endif | ||
273 | return p; | ||
274 | } | ||
275 | |||
264 | static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) | 276 | static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) |
265 | { | 277 | { |
266 | *(void **)(object + s->offset) = fp; | 278 | *(void **)(object + s->offset) = fp; |
@@ -271,10 +283,6 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) | |||
271 | for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ | 283 | for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ |
272 | __p += (__s)->size) | 284 | __p += (__s)->size) |
273 | 285 | ||
274 | /* Scan freelist */ | ||
275 | #define for_each_free_object(__p, __s, __free) \ | ||
276 | for (__p = (__free); __p; __p = get_freepointer((__s), __p)) | ||
277 | |||
278 | /* Determine object index from a given position */ | 286 | /* Determine object index from a given position */ |
279 | static inline int slab_index(void *p, struct kmem_cache *s, void *addr) | 287 | static inline int slab_index(void *p, struct kmem_cache *s, void *addr) |
280 | { | 288 | { |
@@ -332,6 +340,21 @@ static inline int oo_objects(struct kmem_cache_order_objects x) | |||
332 | 340 | ||
333 | #ifdef CONFIG_SLUB_DEBUG | 341 | #ifdef CONFIG_SLUB_DEBUG |
334 | /* | 342 | /* |
343 | * Determine a map of object in use on a page. | ||
344 | * | ||
345 | * Slab lock or node listlock must be held to guarantee that the page does | ||
346 | * not vanish from under us. | ||
347 | */ | ||
348 | static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) | ||
349 | { | ||
350 | void *p; | ||
351 | void *addr = page_address(page); | ||
352 | |||
353 | for (p = page->freelist; p; p = get_freepointer(s, p)) | ||
354 | set_bit(slab_index(p, s, addr), map); | ||
355 | } | ||
356 | |||
357 | /* | ||
335 | * Debug settings: | 358 | * Debug settings: |
336 | */ | 359 | */ |
337 | #ifdef CONFIG_SLUB_DEBUG_ON | 360 | #ifdef CONFIG_SLUB_DEBUG_ON |
@@ -1487,7 +1510,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) | |||
1487 | int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; | 1510 | int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; |
1488 | 1511 | ||
1489 | page = get_partial_node(get_node(s, searchnode)); | 1512 | page = get_partial_node(get_node(s, searchnode)); |
1490 | if (page || node != -1) | 1513 | if (page || node != NUMA_NO_NODE) |
1491 | return page; | 1514 | return page; |
1492 | 1515 | ||
1493 | return get_any_partial(s, flags); | 1516 | return get_any_partial(s, flags); |
@@ -1540,7 +1563,6 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
1540 | } | 1563 | } |
1541 | } | 1564 | } |
1542 | 1565 | ||
1543 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1544 | #ifdef CONFIG_PREEMPT | 1566 | #ifdef CONFIG_PREEMPT |
1545 | /* | 1567 | /* |
1546 | * Calculate the next globally unique transaction for disambiguiation | 1568 | * Calculate the next globally unique transaction for disambiguiation |
@@ -1600,17 +1622,12 @@ static inline void note_cmpxchg_failure(const char *n, | |||
1600 | stat(s, CMPXCHG_DOUBLE_CPU_FAIL); | 1622 | stat(s, CMPXCHG_DOUBLE_CPU_FAIL); |
1601 | } | 1623 | } |
1602 | 1624 | ||
1603 | #endif | ||
1604 | |||
1605 | void init_kmem_cache_cpus(struct kmem_cache *s) | 1625 | void init_kmem_cache_cpus(struct kmem_cache *s) |
1606 | { | 1626 | { |
1607 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1608 | int cpu; | 1627 | int cpu; |
1609 | 1628 | ||
1610 | for_each_possible_cpu(cpu) | 1629 | for_each_possible_cpu(cpu) |
1611 | per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); | 1630 | per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); |
1612 | #endif | ||
1613 | |||
1614 | } | 1631 | } |
1615 | /* | 1632 | /* |
1616 | * Remove the cpu slab | 1633 | * Remove the cpu slab |
@@ -1643,9 +1660,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | |||
1643 | page->inuse--; | 1660 | page->inuse--; |
1644 | } | 1661 | } |
1645 | c->page = NULL; | 1662 | c->page = NULL; |
1646 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1647 | c->tid = next_tid(c->tid); | 1663 | c->tid = next_tid(c->tid); |
1648 | #endif | ||
1649 | unfreeze_slab(s, page, tail); | 1664 | unfreeze_slab(s, page, tail); |
1650 | } | 1665 | } |
1651 | 1666 | ||
@@ -1779,8 +1794,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
1779 | unsigned long addr, struct kmem_cache_cpu *c) | 1794 | unsigned long addr, struct kmem_cache_cpu *c) |
1780 | { | 1795 | { |
1781 | void **object; | 1796 | void **object; |
1782 | struct page *new; | 1797 | struct page *page; |
1783 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1784 | unsigned long flags; | 1798 | unsigned long flags; |
1785 | 1799 | ||
1786 | local_irq_save(flags); | 1800 | local_irq_save(flags); |
@@ -1792,37 +1806,35 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | |||
1792 | */ | 1806 | */ |
1793 | c = this_cpu_ptr(s->cpu_slab); | 1807 | c = this_cpu_ptr(s->cpu_slab); |
1794 | #endif | 1808 | #endif |
1795 | #endif | ||
1796 | 1809 | ||
1797 | /* We handle __GFP_ZERO in the caller */ | 1810 | /* We handle __GFP_ZERO in the caller */ |
1798 | gfpflags &= ~__GFP_ZERO; | 1811 | gfpflags &= ~__GFP_ZERO; |
1799 | 1812 | ||
1800 | if (!c->page) | 1813 | page = c->page; |
1814 | if (!page) | ||
1801 | goto new_slab; | 1815 | goto new_slab; |
1802 | 1816 | ||
1803 | slab_lock(c->page); | 1817 | slab_lock(page); |
1804 | if (unlikely(!node_match(c, node))) | 1818 | if (unlikely(!node_match(c, node))) |
1805 | goto another_slab; | 1819 | goto another_slab; |
1806 | 1820 | ||
1807 | stat(s, ALLOC_REFILL); | 1821 | stat(s, ALLOC_REFILL); |
1808 | 1822 | ||
1809 | load_freelist: | 1823 | load_freelist: |
1810 | object = c->page->freelist; | 1824 | object = page->freelist; |
1811 | if (unlikely(!object)) | 1825 | if (unlikely(!object)) |
1812 | goto another_slab; | 1826 | goto another_slab; |
1813 | if (kmem_cache_debug(s)) | 1827 | if (kmem_cache_debug(s)) |
1814 | goto debug; | 1828 | goto debug; |
1815 | 1829 | ||
1816 | c->freelist = get_freepointer(s, object); | 1830 | c->freelist = get_freepointer(s, object); |
1817 | c->page->inuse = c->page->objects; | 1831 | page->inuse = page->objects; |
1818 | c->page->freelist = NULL; | 1832 | page->freelist = NULL; |
1819 | c->node = page_to_nid(c->page); | 1833 | |
1820 | unlock_out: | 1834 | unlock_out: |
1821 | slab_unlock(c->page); | 1835 | slab_unlock(page); |
1822 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1823 | c->tid = next_tid(c->tid); | 1836 | c->tid = next_tid(c->tid); |
1824 | local_irq_restore(flags); | 1837 | local_irq_restore(flags); |
1825 | #endif | ||
1826 | stat(s, ALLOC_SLOWPATH); | 1838 | stat(s, ALLOC_SLOWPATH); |
1827 | return object; | 1839 | return object; |
1828 | 1840 | ||
@@ -1830,10 +1842,11 @@ another_slab: | |||
1830 | deactivate_slab(s, c); | 1842 | deactivate_slab(s, c); |
1831 | 1843 | ||
1832 | new_slab: | 1844 | new_slab: |
1833 | new = get_partial(s, gfpflags, node); | 1845 | page = get_partial(s, gfpflags, node); |
1834 | if (new) { | 1846 | if (page) { |
1835 | c->page = new; | ||
1836 | stat(s, ALLOC_FROM_PARTIAL); | 1847 | stat(s, ALLOC_FROM_PARTIAL); |
1848 | c->node = page_to_nid(page); | ||
1849 | c->page = page; | ||
1837 | goto load_freelist; | 1850 | goto load_freelist; |
1838 | } | 1851 | } |
1839 | 1852 | ||
@@ -1841,33 +1854,35 @@ new_slab: | |||
1841 | if (gfpflags & __GFP_WAIT) | 1854 | if (gfpflags & __GFP_WAIT) |
1842 | local_irq_enable(); | 1855 | local_irq_enable(); |
1843 | 1856 | ||
1844 | new = new_slab(s, gfpflags, node); | 1857 | page = new_slab(s, gfpflags, node); |
1845 | 1858 | ||
1846 | if (gfpflags & __GFP_WAIT) | 1859 | if (gfpflags & __GFP_WAIT) |
1847 | local_irq_disable(); | 1860 | local_irq_disable(); |
1848 | 1861 | ||
1849 | if (new) { | 1862 | if (page) { |
1850 | c = __this_cpu_ptr(s->cpu_slab); | 1863 | c = __this_cpu_ptr(s->cpu_slab); |
1851 | stat(s, ALLOC_SLAB); | 1864 | stat(s, ALLOC_SLAB); |
1852 | if (c->page) | 1865 | if (c->page) |
1853 | flush_slab(s, c); | 1866 | flush_slab(s, c); |
1854 | slab_lock(new); | 1867 | |
1855 | __SetPageSlubFrozen(new); | 1868 | slab_lock(page); |
1856 | c->page = new; | 1869 | __SetPageSlubFrozen(page); |
1870 | c->node = page_to_nid(page); | ||
1871 | c->page = page; | ||
1857 | goto load_freelist; | 1872 | goto load_freelist; |
1858 | } | 1873 | } |
1859 | if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) | 1874 | if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) |
1860 | slab_out_of_memory(s, gfpflags, node); | 1875 | slab_out_of_memory(s, gfpflags, node); |
1861 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1862 | local_irq_restore(flags); | 1876 | local_irq_restore(flags); |
1863 | #endif | ||
1864 | return NULL; | 1877 | return NULL; |
1865 | debug: | 1878 | debug: |
1866 | if (!alloc_debug_processing(s, c->page, object, addr)) | 1879 | if (!alloc_debug_processing(s, page, object, addr)) |
1867 | goto another_slab; | 1880 | goto another_slab; |
1868 | 1881 | ||
1869 | c->page->inuse++; | 1882 | page->inuse++; |
1870 | c->page->freelist = get_freepointer(s, object); | 1883 | page->freelist = get_freepointer(s, object); |
1884 | deactivate_slab(s, c); | ||
1885 | c->page = NULL; | ||
1871 | c->node = NUMA_NO_NODE; | 1886 | c->node = NUMA_NO_NODE; |
1872 | goto unlock_out; | 1887 | goto unlock_out; |
1873 | } | 1888 | } |
@@ -1887,20 +1902,12 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, | |||
1887 | { | 1902 | { |
1888 | void **object; | 1903 | void **object; |
1889 | struct kmem_cache_cpu *c; | 1904 | struct kmem_cache_cpu *c; |
1890 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1891 | unsigned long tid; | 1905 | unsigned long tid; |
1892 | #else | ||
1893 | unsigned long flags; | ||
1894 | #endif | ||
1895 | 1906 | ||
1896 | if (slab_pre_alloc_hook(s, gfpflags)) | 1907 | if (slab_pre_alloc_hook(s, gfpflags)) |
1897 | return NULL; | 1908 | return NULL; |
1898 | 1909 | ||
1899 | #ifndef CONFIG_CMPXCHG_LOCAL | ||
1900 | local_irq_save(flags); | ||
1901 | #else | ||
1902 | redo: | 1910 | redo: |
1903 | #endif | ||
1904 | 1911 | ||
1905 | /* | 1912 | /* |
1906 | * Must read kmem_cache cpu data via this cpu ptr. Preemption is | 1913 | * Must read kmem_cache cpu data via this cpu ptr. Preemption is |
@@ -1910,7 +1917,6 @@ redo: | |||
1910 | */ | 1917 | */ |
1911 | c = __this_cpu_ptr(s->cpu_slab); | 1918 | c = __this_cpu_ptr(s->cpu_slab); |
1912 | 1919 | ||
1913 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1914 | /* | 1920 | /* |
1915 | * The transaction ids are globally unique per cpu and per operation on | 1921 | * The transaction ids are globally unique per cpu and per operation on |
1916 | * a per cpu queue. Thus they can be guarantee that the cmpxchg_double | 1922 | * a per cpu queue. Thus they can be guarantee that the cmpxchg_double |
@@ -1919,7 +1925,6 @@ redo: | |||
1919 | */ | 1925 | */ |
1920 | tid = c->tid; | 1926 | tid = c->tid; |
1921 | barrier(); | 1927 | barrier(); |
1922 | #endif | ||
1923 | 1928 | ||
1924 | object = c->freelist; | 1929 | object = c->freelist; |
1925 | if (unlikely(!object || !node_match(c, node))) | 1930 | if (unlikely(!object || !node_match(c, node))) |
@@ -1927,7 +1932,6 @@ redo: | |||
1927 | object = __slab_alloc(s, gfpflags, node, addr, c); | 1932 | object = __slab_alloc(s, gfpflags, node, addr, c); |
1928 | 1933 | ||
1929 | else { | 1934 | else { |
1930 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
1931 | /* | 1935 | /* |
1932 | * The cmpxchg will only match if there was no additional | 1936 | * The cmpxchg will only match if there was no additional |
1933 | * operation and if we are on the right processor. | 1937 | * operation and if we are on the right processor. |
@@ -1943,21 +1947,14 @@ redo: | |||
1943 | if (unlikely(!irqsafe_cpu_cmpxchg_double( | 1947 | if (unlikely(!irqsafe_cpu_cmpxchg_double( |
1944 | s->cpu_slab->freelist, s->cpu_slab->tid, | 1948 | s->cpu_slab->freelist, s->cpu_slab->tid, |
1945 | object, tid, | 1949 | object, tid, |
1946 | get_freepointer(s, object), next_tid(tid)))) { | 1950 | get_freepointer_safe(s, object), next_tid(tid)))) { |
1947 | 1951 | ||
1948 | note_cmpxchg_failure("slab_alloc", s, tid); | 1952 | note_cmpxchg_failure("slab_alloc", s, tid); |
1949 | goto redo; | 1953 | goto redo; |
1950 | } | 1954 | } |
1951 | #else | ||
1952 | c->freelist = get_freepointer(s, object); | ||
1953 | #endif | ||
1954 | stat(s, ALLOC_FASTPATH); | 1955 | stat(s, ALLOC_FASTPATH); |
1955 | } | 1956 | } |
1956 | 1957 | ||
1957 | #ifndef CONFIG_CMPXCHG_LOCAL | ||
1958 | local_irq_restore(flags); | ||
1959 | #endif | ||
1960 | |||
1961 | if (unlikely(gfpflags & __GFP_ZERO) && object) | 1958 | if (unlikely(gfpflags & __GFP_ZERO) && object) |
1962 | memset(object, 0, s->objsize); | 1959 | memset(object, 0, s->objsize); |
1963 | 1960 | ||
@@ -2034,18 +2031,15 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2034 | { | 2031 | { |
2035 | void *prior; | 2032 | void *prior; |
2036 | void **object = (void *)x; | 2033 | void **object = (void *)x; |
2037 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
2038 | unsigned long flags; | 2034 | unsigned long flags; |
2039 | 2035 | ||
2040 | local_irq_save(flags); | 2036 | local_irq_save(flags); |
2041 | #endif | ||
2042 | slab_lock(page); | 2037 | slab_lock(page); |
2043 | stat(s, FREE_SLOWPATH); | 2038 | stat(s, FREE_SLOWPATH); |
2044 | 2039 | ||
2045 | if (kmem_cache_debug(s)) | 2040 | if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) |
2046 | goto debug; | 2041 | goto out_unlock; |
2047 | 2042 | ||
2048 | checks_ok: | ||
2049 | prior = page->freelist; | 2043 | prior = page->freelist; |
2050 | set_freepointer(s, object, prior); | 2044 | set_freepointer(s, object, prior); |
2051 | page->freelist = object; | 2045 | page->freelist = object; |
@@ -2070,9 +2064,7 @@ checks_ok: | |||
2070 | 2064 | ||
2071 | out_unlock: | 2065 | out_unlock: |
2072 | slab_unlock(page); | 2066 | slab_unlock(page); |
2073 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
2074 | local_irq_restore(flags); | 2067 | local_irq_restore(flags); |
2075 | #endif | ||
2076 | return; | 2068 | return; |
2077 | 2069 | ||
2078 | slab_empty: | 2070 | slab_empty: |
@@ -2084,17 +2076,9 @@ slab_empty: | |||
2084 | stat(s, FREE_REMOVE_PARTIAL); | 2076 | stat(s, FREE_REMOVE_PARTIAL); |
2085 | } | 2077 | } |
2086 | slab_unlock(page); | 2078 | slab_unlock(page); |
2087 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
2088 | local_irq_restore(flags); | 2079 | local_irq_restore(flags); |
2089 | #endif | ||
2090 | stat(s, FREE_SLAB); | 2080 | stat(s, FREE_SLAB); |
2091 | discard_slab(s, page); | 2081 | discard_slab(s, page); |
2092 | return; | ||
2093 | |||
2094 | debug: | ||
2095 | if (!free_debug_processing(s, page, x, addr)) | ||
2096 | goto out_unlock; | ||
2097 | goto checks_ok; | ||
2098 | } | 2082 | } |
2099 | 2083 | ||
2100 | /* | 2084 | /* |
@@ -2113,20 +2097,11 @@ static __always_inline void slab_free(struct kmem_cache *s, | |||
2113 | { | 2097 | { |
2114 | void **object = (void *)x; | 2098 | void **object = (void *)x; |
2115 | struct kmem_cache_cpu *c; | 2099 | struct kmem_cache_cpu *c; |
2116 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
2117 | unsigned long tid; | 2100 | unsigned long tid; |
2118 | #else | ||
2119 | unsigned long flags; | ||
2120 | #endif | ||
2121 | 2101 | ||
2122 | slab_free_hook(s, x); | 2102 | slab_free_hook(s, x); |
2123 | 2103 | ||
2124 | #ifndef CONFIG_CMPXCHG_LOCAL | ||
2125 | local_irq_save(flags); | ||
2126 | |||
2127 | #else | ||
2128 | redo: | 2104 | redo: |
2129 | #endif | ||
2130 | 2105 | ||
2131 | /* | 2106 | /* |
2132 | * Determine the currently cpus per cpu slab. | 2107 | * Determine the currently cpus per cpu slab. |
@@ -2136,15 +2111,12 @@ redo: | |||
2136 | */ | 2111 | */ |
2137 | c = __this_cpu_ptr(s->cpu_slab); | 2112 | c = __this_cpu_ptr(s->cpu_slab); |
2138 | 2113 | ||
2139 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
2140 | tid = c->tid; | 2114 | tid = c->tid; |
2141 | barrier(); | 2115 | barrier(); |
2142 | #endif | ||
2143 | 2116 | ||
2144 | if (likely(page == c->page && c->node != NUMA_NO_NODE)) { | 2117 | if (likely(page == c->page)) { |
2145 | set_freepointer(s, object, c->freelist); | 2118 | set_freepointer(s, object, c->freelist); |
2146 | 2119 | ||
2147 | #ifdef CONFIG_CMPXCHG_LOCAL | ||
2148 | if (unlikely(!irqsafe_cpu_cmpxchg_double( | 2120 | if (unlikely(!irqsafe_cpu_cmpxchg_double( |
2149 | s->cpu_slab->freelist, s->cpu_slab->tid, | 2121 | s->cpu_slab->freelist, s->cpu_slab->tid, |
2150 | c->freelist, tid, | 2122 | c->freelist, tid, |
@@ -2153,16 +2125,10 @@ redo: | |||
2153 | note_cmpxchg_failure("slab_free", s, tid); | 2125 | note_cmpxchg_failure("slab_free", s, tid); |
2154 | goto redo; | 2126 | goto redo; |
2155 | } | 2127 | } |
2156 | #else | ||
2157 | c->freelist = object; | ||
2158 | #endif | ||
2159 | stat(s, FREE_FASTPATH); | 2128 | stat(s, FREE_FASTPATH); |
2160 | } else | 2129 | } else |
2161 | __slab_free(s, page, x, addr); | 2130 | __slab_free(s, page, x, addr); |
2162 | 2131 | ||
2163 | #ifndef CONFIG_CMPXCHG_LOCAL | ||
2164 | local_irq_restore(flags); | ||
2165 | #endif | ||
2166 | } | 2132 | } |
2167 | 2133 | ||
2168 | void kmem_cache_free(struct kmem_cache *s, void *x) | 2134 | void kmem_cache_free(struct kmem_cache *s, void *x) |
@@ -2673,9 +2639,8 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, | |||
2673 | return; | 2639 | return; |
2674 | slab_err(s, page, "%s", text); | 2640 | slab_err(s, page, "%s", text); |
2675 | slab_lock(page); | 2641 | slab_lock(page); |
2676 | for_each_free_object(p, s, page->freelist) | ||
2677 | set_bit(slab_index(p, s, addr), map); | ||
2678 | 2642 | ||
2643 | get_map(s, page, map); | ||
2679 | for_each_object(p, s, addr, page->objects) { | 2644 | for_each_object(p, s, addr, page->objects) { |
2680 | 2645 | ||
2681 | if (!test_bit(slab_index(p, s, addr), map)) { | 2646 | if (!test_bit(slab_index(p, s, addr), map)) { |
@@ -3203,7 +3168,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) | |||
3203 | list_for_each_entry(p, &n->partial, lru) | 3168 | list_for_each_entry(p, &n->partial, lru) |
3204 | p->slab = s; | 3169 | p->slab = s; |
3205 | 3170 | ||
3206 | #ifdef CONFIG_SLAB_DEBUG | 3171 | #ifdef CONFIG_SLUB_DEBUG |
3207 | list_for_each_entry(p, &n->full, lru) | 3172 | list_for_each_entry(p, &n->full, lru) |
3208 | p->slab = s; | 3173 | p->slab = s; |
3209 | #endif | 3174 | #endif |
@@ -3610,10 +3575,11 @@ static int validate_slab(struct kmem_cache *s, struct page *page, | |||
3610 | /* Now we know that a valid freelist exists */ | 3575 | /* Now we know that a valid freelist exists */ |
3611 | bitmap_zero(map, page->objects); | 3576 | bitmap_zero(map, page->objects); |
3612 | 3577 | ||
3613 | for_each_free_object(p, s, page->freelist) { | 3578 | get_map(s, page, map); |
3614 | set_bit(slab_index(p, s, addr), map); | 3579 | for_each_object(p, s, addr, page->objects) { |
3615 | if (!check_object(s, page, p, SLUB_RED_INACTIVE)) | 3580 | if (test_bit(slab_index(p, s, addr), map)) |
3616 | return 0; | 3581 | if (!check_object(s, page, p, SLUB_RED_INACTIVE)) |
3582 | return 0; | ||
3617 | } | 3583 | } |
3618 | 3584 | ||
3619 | for_each_object(p, s, addr, page->objects) | 3585 | for_each_object(p, s, addr, page->objects) |
@@ -3821,8 +3787,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s, | |||
3821 | void *p; | 3787 | void *p; |
3822 | 3788 | ||
3823 | bitmap_zero(map, page->objects); | 3789 | bitmap_zero(map, page->objects); |
3824 | for_each_free_object(p, s, page->freelist) | 3790 | get_map(s, page, map); |
3825 | set_bit(slab_index(p, s, addr), map); | ||
3826 | 3791 | ||
3827 | for_each_object(p, s, addr, page->objects) | 3792 | for_each_object(p, s, addr, page->objects) |
3828 | if (!test_bit(slab_index(p, s, addr), map)) | 3793 | if (!test_bit(slab_index(p, s, addr), map)) |