diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2016-03-15 17:54:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 19:55:16 -0400 |
commit | 40b44137971c2e5865a78f9f7de274449983ccb5 (patch) | |
tree | 536b3d1a1a13a8de640d97ba28a704876352a41d /mm/slab.c | |
parent | 40323278b557a5909bbecfa181c91a3af7afbbe3 (diff) |
mm/slab: clean up DEBUG_PAGEALLOC processing code
Currently, open code for checking DEBUG_PAGEALLOC cache is spread to
some sites. It makes code unreadable and hard to change.
This patch cleans up this code. The following patch will change the
criteria for DEBUG_PAGEALLOC cache so this clean-up will help it, too.
[akpm@linux-foundation.org: fix build with CONFIG_DEBUG_PAGEALLOC=n]
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 97 |
1 files changed, 49 insertions, 48 deletions
@@ -1661,6 +1661,14 @@ static void kmem_rcu_free(struct rcu_head *head) | |||
1661 | } | 1661 | } |
1662 | 1662 | ||
1663 | #if DEBUG | 1663 | #if DEBUG |
1664 | static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) | ||
1665 | { | ||
1666 | if (debug_pagealloc_enabled() && OFF_SLAB(cachep) && | ||
1667 | (cachep->size % PAGE_SIZE) == 0) | ||
1668 | return true; | ||
1669 | |||
1670 | return false; | ||
1671 | } | ||
1664 | 1672 | ||
1665 | #ifdef CONFIG_DEBUG_PAGEALLOC | 1673 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1666 | static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, | 1674 | static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, |
@@ -1694,6 +1702,23 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, | |||
1694 | } | 1702 | } |
1695 | *addr++ = 0x87654321; | 1703 | *addr++ = 0x87654321; |
1696 | } | 1704 | } |
1705 | |||
1706 | static void slab_kernel_map(struct kmem_cache *cachep, void *objp, | ||
1707 | int map, unsigned long caller) | ||
1708 | { | ||
1709 | if (!is_debug_pagealloc_cache(cachep)) | ||
1710 | return; | ||
1711 | |||
1712 | if (caller) | ||
1713 | store_stackinfo(cachep, objp, caller); | ||
1714 | |||
1715 | kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); | ||
1716 | } | ||
1717 | |||
1718 | #else | ||
1719 | static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, | ||
1720 | int map, unsigned long caller) {} | ||
1721 | |||
1697 | #endif | 1722 | #endif |
1698 | 1723 | ||
1699 | static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) | 1724 | static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) |
@@ -1772,6 +1797,9 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) | |||
1772 | int size, i; | 1797 | int size, i; |
1773 | int lines = 0; | 1798 | int lines = 0; |
1774 | 1799 | ||
1800 | if (is_debug_pagealloc_cache(cachep)) | ||
1801 | return; | ||
1802 | |||
1775 | realobj = (char *)objp + obj_offset(cachep); | 1803 | realobj = (char *)objp + obj_offset(cachep); |
1776 | size = cachep->object_size; | 1804 | size = cachep->object_size; |
1777 | 1805 | ||
@@ -1837,17 +1865,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, | |||
1837 | void *objp = index_to_obj(cachep, page, i); | 1865 | void *objp = index_to_obj(cachep, page, i); |
1838 | 1866 | ||
1839 | if (cachep->flags & SLAB_POISON) { | 1867 | if (cachep->flags & SLAB_POISON) { |
1840 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1841 | if (debug_pagealloc_enabled() && | ||
1842 | cachep->size % PAGE_SIZE == 0 && | ||
1843 | OFF_SLAB(cachep)) | ||
1844 | kernel_map_pages(virt_to_page(objp), | ||
1845 | cachep->size / PAGE_SIZE, 1); | ||
1846 | else | ||
1847 | check_poison_obj(cachep, objp); | ||
1848 | #else | ||
1849 | check_poison_obj(cachep, objp); | 1868 | check_poison_obj(cachep, objp); |
1850 | #endif | 1869 | slab_kernel_map(cachep, objp, 1, 0); |
1851 | } | 1870 | } |
1852 | if (cachep->flags & SLAB_RED_ZONE) { | 1871 | if (cachep->flags & SLAB_RED_ZONE) { |
1853 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) | 1872 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) |
@@ -2226,16 +2245,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2226 | if (flags & CFLGS_OFF_SLAB) { | 2245 | if (flags & CFLGS_OFF_SLAB) { |
2227 | /* really off slab. No need for manual alignment */ | 2246 | /* really off slab. No need for manual alignment */ |
2228 | freelist_size = calculate_freelist_size(cachep->num, 0); | 2247 | freelist_size = calculate_freelist_size(cachep->num, 0); |
2229 | |||
2230 | #ifdef CONFIG_PAGE_POISONING | ||
2231 | /* If we're going to use the generic kernel_map_pages() | ||
2232 | * poisoning, then it's going to smash the contents of | ||
2233 | * the redzone and userword anyhow, so switch them off. | ||
2234 | */ | ||
2235 | if (debug_pagealloc_enabled() && | ||
2236 | size % PAGE_SIZE == 0 && flags & SLAB_POISON) | ||
2237 | flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); | ||
2238 | #endif | ||
2239 | } | 2248 | } |
2240 | 2249 | ||
2241 | cachep->colour_off = cache_line_size(); | 2250 | cachep->colour_off = cache_line_size(); |
@@ -2251,7 +2260,19 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
2251 | cachep->size = size; | 2260 | cachep->size = size; |
2252 | cachep->reciprocal_buffer_size = reciprocal_value(size); | 2261 | cachep->reciprocal_buffer_size = reciprocal_value(size); |
2253 | 2262 | ||
2254 | if (flags & CFLGS_OFF_SLAB) { | 2263 | #if DEBUG |
2264 | /* | ||
2265 | * If we're going to use the generic kernel_map_pages() | ||
2266 | * poisoning, then it's going to smash the contents of | ||
2267 | * the redzone and userword anyhow, so switch them off. | ||
2268 | */ | ||
2269 | if (IS_ENABLED(CONFIG_PAGE_POISONING) && | ||
2270 | (cachep->flags & SLAB_POISON) && | ||
2271 | is_debug_pagealloc_cache(cachep)) | ||
2272 | cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); | ||
2273 | #endif | ||
2274 | |||
2275 | if (OFF_SLAB(cachep)) { | ||
2255 | cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); | 2276 | cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); |
2256 | /* | 2277 | /* |
2257 | * This is a possibility for one of the kmalloc_{dma,}_caches. | 2278 | * This is a possibility for one of the kmalloc_{dma,}_caches. |
@@ -2475,9 +2496,6 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2475 | for (i = 0; i < cachep->num; i++) { | 2496 | for (i = 0; i < cachep->num; i++) { |
2476 | void *objp = index_to_obj(cachep, page, i); | 2497 | void *objp = index_to_obj(cachep, page, i); |
2477 | #if DEBUG | 2498 | #if DEBUG |
2478 | /* need to poison the objs? */ | ||
2479 | if (cachep->flags & SLAB_POISON) | ||
2480 | poison_obj(cachep, objp, POISON_FREE); | ||
2481 | if (cachep->flags & SLAB_STORE_USER) | 2499 | if (cachep->flags & SLAB_STORE_USER) |
2482 | *dbg_userword(cachep, objp) = NULL; | 2500 | *dbg_userword(cachep, objp) = NULL; |
2483 | 2501 | ||
@@ -2501,10 +2519,11 @@ static void cache_init_objs(struct kmem_cache *cachep, | |||
2501 | slab_error(cachep, "constructor overwrote the" | 2519 | slab_error(cachep, "constructor overwrote the" |
2502 | " start of an object"); | 2520 | " start of an object"); |
2503 | } | 2521 | } |
2504 | if ((cachep->size % PAGE_SIZE) == 0 && | 2522 | /* need to poison the objs? */ |
2505 | OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) | 2523 | if (cachep->flags & SLAB_POISON) { |
2506 | kernel_map_pages(virt_to_page(objp), | 2524 | poison_obj(cachep, objp, POISON_FREE); |
2507 | cachep->size / PAGE_SIZE, 0); | 2525 | slab_kernel_map(cachep, objp, 0, 0); |
2526 | } | ||
2508 | #else | 2527 | #else |
2509 | if (cachep->ctor) | 2528 | if (cachep->ctor) |
2510 | cachep->ctor(objp); | 2529 | cachep->ctor(objp); |
@@ -2716,18 +2735,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |||
2716 | 2735 | ||
2717 | set_obj_status(page, objnr, OBJECT_FREE); | 2736 | set_obj_status(page, objnr, OBJECT_FREE); |
2718 | if (cachep->flags & SLAB_POISON) { | 2737 | if (cachep->flags & SLAB_POISON) { |
2719 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
2720 | if (debug_pagealloc_enabled() && | ||
2721 | (cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) { | ||
2722 | store_stackinfo(cachep, objp, caller); | ||
2723 | kernel_map_pages(virt_to_page(objp), | ||
2724 | cachep->size / PAGE_SIZE, 0); | ||
2725 | } else { | ||
2726 | poison_obj(cachep, objp, POISON_FREE); | ||
2727 | } | ||
2728 | #else | ||
2729 | poison_obj(cachep, objp, POISON_FREE); | 2738 | poison_obj(cachep, objp, POISON_FREE); |
2730 | #endif | 2739 | slab_kernel_map(cachep, objp, 0, caller); |
2731 | } | 2740 | } |
2732 | return objp; | 2741 | return objp; |
2733 | } | 2742 | } |
@@ -2862,16 +2871,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
2862 | if (!objp) | 2871 | if (!objp) |
2863 | return objp; | 2872 | return objp; |
2864 | if (cachep->flags & SLAB_POISON) { | 2873 | if (cachep->flags & SLAB_POISON) { |
2865 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
2866 | if (debug_pagealloc_enabled() && | ||
2867 | (cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) | ||
2868 | kernel_map_pages(virt_to_page(objp), | ||
2869 | cachep->size / PAGE_SIZE, 1); | ||
2870 | else | ||
2871 | check_poison_obj(cachep, objp); | ||
2872 | #else | ||
2873 | check_poison_obj(cachep, objp); | 2874 | check_poison_obj(cachep, objp); |
2874 | #endif | 2875 | slab_kernel_map(cachep, objp, 1, 0); |
2875 | poison_obj(cachep, objp, POISON_INUSE); | 2876 | poison_obj(cachep, objp, POISON_INUSE); |
2876 | } | 2877 | } |
2877 | if (cachep->flags & SLAB_STORE_USER) | 2878 | if (cachep->flags & SLAB_STORE_USER) |