diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2014-02-10 17:25:46 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-02-10 19:01:42 -0500 |
commit | 1e4dd9461fabfbc780cdfaf103cec790f3a53325 (patch) | |
tree | 5842f997753a45cdc3e6c937d852f17056c34d70 /mm/slub.c | |
parent | 25fba9bebeb79447bbe08dc0e1f441caef9f5644 (diff) |
slub: do not assert not having lock in removing freed partial
Vladimir reported the following issue:
Commit c65c1877bd68 ("slub: use lockdep_assert_held") requires
remove_partial() to be called with n->list_lock held, but free_partial()
called from kmem_cache_close() on cache destruction does not follow this
rule, leading to a warning:
WARNING: CPU: 0 PID: 2787 at mm/slub.c:1536 __kmem_cache_shutdown+0x1b2/0x1f0()
Modules linked in:
CPU: 0 PID: 2787 Comm: modprobe Tainted: G W 3.14.0-rc1-mm1+ #1
Hardware name:
0000000000000600 ffff88003ae1dde8 ffffffff816d9583 0000000000000600
0000000000000000 ffff88003ae1de28 ffffffff8107c107 0000000000000000
ffff880037ab2b00 ffff88007c240d30 ffffea0001ee5280 ffffea0001ee52a0
Call Trace:
__kmem_cache_shutdown+0x1b2/0x1f0
kmem_cache_destroy+0x43/0xf0
xfs_destroy_zones+0x103/0x110 [xfs]
exit_xfs_fs+0x38/0x4e4 [xfs]
SyS_delete_module+0x19a/0x1f0
system_call_fastpath+0x16/0x1b
His solution was to add a spinlock in order to quiet lockdep. Although
there would be no contention to adding the lock, that lock also requires
disabling of interrupts which will have a larger impact on the system.
Instead of adding a spinlock to a location where it is not needed for
lockdep, make a __remove_partial() function that does not test if the
list_lock is held, as no one should have it due to it being freed.
Also added a __add_partial() function that does not do the lock
validation either, as it is not needed for the creation of the cache.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Reported-by: Vladimir Davydov <vdavydov@parallels.com>
Suggested-by: David Rientjes <rientjes@google.com>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Vladimir Davydov <vdavydov@parallels.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 32 |
1 files changed, 20 insertions, 12 deletions
@@ -1518,11 +1518,9 @@ static void discard_slab(struct kmem_cache *s, struct page *page) | |||
1518 | /* | 1518 | /* |
1519 | * Management of partially allocated slabs. | 1519 | * Management of partially allocated slabs. |
1520 | */ | 1520 | */ |
1521 | static inline void add_partial(struct kmem_cache_node *n, | 1521 | static inline void |
1522 | struct page *page, int tail) | 1522 | __add_partial(struct kmem_cache_node *n, struct page *page, int tail) |
1523 | { | 1523 | { |
1524 | lockdep_assert_held(&n->list_lock); | ||
1525 | |||
1526 | n->nr_partial++; | 1524 | n->nr_partial++; |
1527 | if (tail == DEACTIVATE_TO_TAIL) | 1525 | if (tail == DEACTIVATE_TO_TAIL) |
1528 | list_add_tail(&page->lru, &n->partial); | 1526 | list_add_tail(&page->lru, &n->partial); |
@@ -1530,15 +1528,27 @@ static inline void add_partial(struct kmem_cache_node *n, | |||
1530 | list_add(&page->lru, &n->partial); | 1528 | list_add(&page->lru, &n->partial); |
1531 | } | 1529 | } |
1532 | 1530 | ||
1533 | static inline void remove_partial(struct kmem_cache_node *n, | 1531 | static inline void add_partial(struct kmem_cache_node *n, |
1534 | struct page *page) | 1532 | struct page *page, int tail) |
1535 | { | 1533 | { |
1536 | lockdep_assert_held(&n->list_lock); | 1534 | lockdep_assert_held(&n->list_lock); |
1535 | __add_partial(n, page, tail); | ||
1536 | } | ||
1537 | 1537 | ||
1538 | static inline void | ||
1539 | __remove_partial(struct kmem_cache_node *n, struct page *page) | ||
1540 | { | ||
1538 | list_del(&page->lru); | 1541 | list_del(&page->lru); |
1539 | n->nr_partial--; | 1542 | n->nr_partial--; |
1540 | } | 1543 | } |
1541 | 1544 | ||
1545 | static inline void remove_partial(struct kmem_cache_node *n, | ||
1546 | struct page *page) | ||
1547 | { | ||
1548 | lockdep_assert_held(&n->list_lock); | ||
1549 | __remove_partial(n, page); | ||
1550 | } | ||
1551 | |||
1542 | /* | 1552 | /* |
1543 | * Remove slab from the partial list, freeze it and | 1553 | * Remove slab from the partial list, freeze it and |
1544 | * return the pointer to the freelist. | 1554 | * return the pointer to the freelist. |
@@ -2904,12 +2914,10 @@ static void early_kmem_cache_node_alloc(int node) | |||
2904 | inc_slabs_node(kmem_cache_node, node, page->objects); | 2914 | inc_slabs_node(kmem_cache_node, node, page->objects); |
2905 | 2915 | ||
2906 | /* | 2916 | /* |
2907 | * the lock is for lockdep's sake, not for any actual | 2917 | * No locks need to be taken here as it has just been |
2908 | * race protection | 2918 | * initialized and there is no concurrent access. |
2909 | */ | 2919 | */ |
2910 | spin_lock(&n->list_lock); | 2920 | __add_partial(n, page, DEACTIVATE_TO_HEAD); |
2911 | add_partial(n, page, DEACTIVATE_TO_HEAD); | ||
2912 | spin_unlock(&n->list_lock); | ||
2913 | } | 2921 | } |
2914 | 2922 | ||
2915 | static void free_kmem_cache_nodes(struct kmem_cache *s) | 2923 | static void free_kmem_cache_nodes(struct kmem_cache *s) |
@@ -3195,7 +3203,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) | |||
3195 | 3203 | ||
3196 | list_for_each_entry_safe(page, h, &n->partial, lru) { | 3204 | list_for_each_entry_safe(page, h, &n->partial, lru) { |
3197 | if (!page->inuse) { | 3205 | if (!page->inuse) { |
3198 | remove_partial(n, page); | 3206 | __remove_partial(n, page); |
3199 | discard_slab(s, page); | 3207 | discard_slab(s, page); |
3200 | } else { | 3208 | } else { |
3201 | list_slab_objects(s, page, | 3209 | list_slab_objects(s, page, |