diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-01-10 07:23:49 -0500 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2014-01-13 14:34:39 -0500 |
commit | c65c1877bd6826ce0d9713d76e30a7bed8e49f38 (patch) | |
tree | 27b07e870c75ac9f393017614bd6a93d0cca9415 /mm/slub.c | |
parent | 8afb1474db4701d1ab80cd8251137a3260e6913e (diff) |
slub: use lockdep_assert_held
Instead of using comments in an attempt at getting the locking right,
use proper assertions that actively warn you if you got it wrong.
Also add extra braces in a few sites to comply with coding-style.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 40 |
1 files changed, 20 insertions, 20 deletions
@@ -985,23 +985,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) | |||
985 | 985 | ||
986 | /* | 986 | /* |
987 | * Tracking of fully allocated slabs for debugging purposes. | 987 | * Tracking of fully allocated slabs for debugging purposes. |
988 | * | ||
989 | * list_lock must be held. | ||
990 | */ | 988 | */ |
991 | static void add_full(struct kmem_cache *s, | 989 | static void add_full(struct kmem_cache *s, |
992 | struct kmem_cache_node *n, struct page *page) | 990 | struct kmem_cache_node *n, struct page *page) |
993 | { | 991 | { |
992 | lockdep_assert_held(&n->list_lock); | ||
993 | |||
994 | if (!(s->flags & SLAB_STORE_USER)) | 994 | if (!(s->flags & SLAB_STORE_USER)) |
995 | return; | 995 | return; |
996 | 996 | ||
997 | list_add(&page->lru, &n->full); | 997 | list_add(&page->lru, &n->full); |
998 | } | 998 | } |
999 | 999 | ||
1000 | /* | 1000 | static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) |
1001 | * list_lock must be held. | ||
1002 | */ | ||
1003 | static void remove_full(struct kmem_cache *s, struct page *page) | ||
1004 | { | 1001 | { |
1002 | lockdep_assert_held(&n->list_lock); | ||
1003 | |||
1005 | if (!(s->flags & SLAB_STORE_USER)) | 1004 | if (!(s->flags & SLAB_STORE_USER)) |
1006 | return; | 1005 | return; |
1007 | 1006 | ||
@@ -1250,7 +1249,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page, | |||
1250 | void *object, u8 val) { return 1; } | 1249 | void *object, u8 val) { return 1; } |
1251 | static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, | 1250 | static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, |
1252 | struct page *page) {} | 1251 | struct page *page) {} |
1253 | static inline void remove_full(struct kmem_cache *s, struct page *page) {} | 1252 | static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, |
1253 | struct page *page) {} | ||
1254 | static inline unsigned long kmem_cache_flags(unsigned long object_size, | 1254 | static inline unsigned long kmem_cache_flags(unsigned long object_size, |
1255 | unsigned long flags, const char *name, | 1255 | unsigned long flags, const char *name, |
1256 | void (*ctor)(void *)) | 1256 | void (*ctor)(void *)) |
@@ -1504,12 +1504,12 @@ static void discard_slab(struct kmem_cache *s, struct page *page) | |||
1504 | 1504 | ||
1505 | /* | 1505 | /* |
1506 | * Management of partially allocated slabs. | 1506 | * Management of partially allocated slabs. |
1507 | * | ||
1508 | * list_lock must be held. | ||
1509 | */ | 1507 | */ |
1510 | static inline void add_partial(struct kmem_cache_node *n, | 1508 | static inline void add_partial(struct kmem_cache_node *n, |
1511 | struct page *page, int tail) | 1509 | struct page *page, int tail) |
1512 | { | 1510 | { |
1511 | lockdep_assert_held(&n->list_lock); | ||
1512 | |||
1513 | n->nr_partial++; | 1513 | n->nr_partial++; |
1514 | if (tail == DEACTIVATE_TO_TAIL) | 1514 | if (tail == DEACTIVATE_TO_TAIL) |
1515 | list_add_tail(&page->lru, &n->partial); | 1515 | list_add_tail(&page->lru, &n->partial); |
@@ -1517,12 +1517,11 @@ static inline void add_partial(struct kmem_cache_node *n, | |||
1517 | list_add(&page->lru, &n->partial); | 1517 | list_add(&page->lru, &n->partial); |
1518 | } | 1518 | } |
1519 | 1519 | ||
1520 | /* | ||
1521 | * list_lock must be held. | ||
1522 | */ | ||
1523 | static inline void remove_partial(struct kmem_cache_node *n, | 1520 | static inline void remove_partial(struct kmem_cache_node *n, |
1524 | struct page *page) | 1521 | struct page *page) |
1525 | { | 1522 | { |
1523 | lockdep_assert_held(&n->list_lock); | ||
1524 | |||
1526 | list_del(&page->lru); | 1525 | list_del(&page->lru); |
1527 | n->nr_partial--; | 1526 | n->nr_partial--; |
1528 | } | 1527 | } |
@@ -1532,8 +1531,6 @@ static inline void remove_partial(struct kmem_cache_node *n, | |||
1532 | * return the pointer to the freelist. | 1531 | * return the pointer to the freelist. |
1533 | * | 1532 | * |
1534 | * Returns a list of objects or NULL if it fails. | 1533 | * Returns a list of objects or NULL if it fails. |
1535 | * | ||
1536 | * Must hold list_lock since we modify the partial list. | ||
1537 | */ | 1534 | */ |
1538 | static inline void *acquire_slab(struct kmem_cache *s, | 1535 | static inline void *acquire_slab(struct kmem_cache *s, |
1539 | struct kmem_cache_node *n, struct page *page, | 1536 | struct kmem_cache_node *n, struct page *page, |
@@ -1543,6 +1540,8 @@ static inline void *acquire_slab(struct kmem_cache *s, | |||
1543 | unsigned long counters; | 1540 | unsigned long counters; |
1544 | struct page new; | 1541 | struct page new; |
1545 | 1542 | ||
1543 | lockdep_assert_held(&n->list_lock); | ||
1544 | |||
1546 | /* | 1545 | /* |
1547 | * Zap the freelist and set the frozen bit. | 1546 | * Zap the freelist and set the frozen bit. |
1548 | * The old freelist is the list of objects for the | 1547 | * The old freelist is the list of objects for the |
@@ -1887,7 +1886,7 @@ redo: | |||
1887 | 1886 | ||
1888 | else if (l == M_FULL) | 1887 | else if (l == M_FULL) |
1889 | 1888 | ||
1890 | remove_full(s, page); | 1889 | remove_full(s, n, page); |
1891 | 1890 | ||
1892 | if (m == M_PARTIAL) { | 1891 | if (m == M_PARTIAL) { |
1893 | 1892 | ||
@@ -2541,7 +2540,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2541 | new.inuse--; | 2540 | new.inuse--; |
2542 | if ((!new.inuse || !prior) && !was_frozen) { | 2541 | if ((!new.inuse || !prior) && !was_frozen) { |
2543 | 2542 | ||
2544 | if (kmem_cache_has_cpu_partial(s) && !prior) | 2543 | if (kmem_cache_has_cpu_partial(s) && !prior) { |
2545 | 2544 | ||
2546 | /* | 2545 | /* |
2547 | * Slab was on no list before and will be | 2546 | * Slab was on no list before and will be |
@@ -2551,7 +2550,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2551 | */ | 2550 | */ |
2552 | new.frozen = 1; | 2551 | new.frozen = 1; |
2553 | 2552 | ||
2554 | else { /* Needs to be taken off a list */ | 2553 | } else { /* Needs to be taken off a list */ |
2555 | 2554 | ||
2556 | n = get_node(s, page_to_nid(page)); | 2555 | n = get_node(s, page_to_nid(page)); |
2557 | /* | 2556 | /* |
@@ -2600,7 +2599,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2600 | */ | 2599 | */ |
2601 | if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { | 2600 | if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { |
2602 | if (kmem_cache_debug(s)) | 2601 | if (kmem_cache_debug(s)) |
2603 | remove_full(s, page); | 2602 | remove_full(s, n, page); |
2604 | add_partial(n, page, DEACTIVATE_TO_TAIL); | 2603 | add_partial(n, page, DEACTIVATE_TO_TAIL); |
2605 | stat(s, FREE_ADD_PARTIAL); | 2604 | stat(s, FREE_ADD_PARTIAL); |
2606 | } | 2605 | } |
@@ -2614,9 +2613,10 @@ slab_empty: | |||
2614 | */ | 2613 | */ |
2615 | remove_partial(n, page); | 2614 | remove_partial(n, page); |
2616 | stat(s, FREE_REMOVE_PARTIAL); | 2615 | stat(s, FREE_REMOVE_PARTIAL); |
2617 | } else | 2616 | } else { |
2618 | /* Slab must be on the full list */ | 2617 | /* Slab must be on the full list */ |
2619 | remove_full(s, page); | 2618 | remove_full(s, n, page); |
2619 | } | ||
2620 | 2620 | ||
2621 | spin_unlock_irqrestore(&n->list_lock, flags); | 2621 | spin_unlock_irqrestore(&n->list_lock, flags); |
2622 | stat(s, FREE_SLAB); | 2622 | stat(s, FREE_SLAB); |