diff options
author | Christoph Lameter <cl@linux.com> | 2011-06-01 13:25:50 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2011-07-02 06:26:54 -0400 |
commit | 5cc6eee8a8c1aefe9c86fe7345a2aa1c4ca70dc6 (patch) | |
tree | 3d69b8a7a44094ea336efbc59698d0949f5f6ec7 /mm/slub.c | |
parent | b789ef518b2a7231b0668c813f677cee528a9d3f (diff) |
slub: explicit list_lock taking
The allocator fastpath rework does change the usage of the list_lock.
Remove the list_lock processing from the functions that hide them from the
critical sections and move them into those critical sections.
This in turn simplifies the support functions (no __ variant needed anymore)
and simplifies the lock handling on bootstrap.
Inline add_partial since it becomes pretty simple.
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 89 |
1 files changed, 49 insertions, 40 deletions
@@ -916,26 +916,27 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) | |||
916 | 916 | ||
917 | /* | 917 | /* |
918 | * Tracking of fully allocated slabs for debugging purposes. | 918 | * Tracking of fully allocated slabs for debugging purposes. |
919 | * | ||
920 | * list_lock must be held. | ||
919 | */ | 921 | */ |
920 | static void add_full(struct kmem_cache_node *n, struct page *page) | 922 | static void add_full(struct kmem_cache *s, |
923 | struct kmem_cache_node *n, struct page *page) | ||
921 | { | 924 | { |
922 | spin_lock(&n->list_lock); | 925 | if (!(s->flags & SLAB_STORE_USER)) |
926 | return; | ||
927 | |||
923 | list_add(&page->lru, &n->full); | 928 | list_add(&page->lru, &n->full); |
924 | spin_unlock(&n->list_lock); | ||
925 | } | 929 | } |
926 | 930 | ||
931 | /* | ||
932 | * list_lock must be held. | ||
933 | */ | ||
927 | static void remove_full(struct kmem_cache *s, struct page *page) | 934 | static void remove_full(struct kmem_cache *s, struct page *page) |
928 | { | 935 | { |
929 | struct kmem_cache_node *n; | ||
930 | |||
931 | if (!(s->flags & SLAB_STORE_USER)) | 936 | if (!(s->flags & SLAB_STORE_USER)) |
932 | return; | 937 | return; |
933 | 938 | ||
934 | n = get_node(s, page_to_nid(page)); | ||
935 | |||
936 | spin_lock(&n->list_lock); | ||
937 | list_del(&page->lru); | 939 | list_del(&page->lru); |
938 | spin_unlock(&n->list_lock); | ||
939 | } | 940 | } |
940 | 941 | ||
941 | /* Tracking of the number of slabs for debugging purposes */ | 942 | /* Tracking of the number of slabs for debugging purposes */ |
@@ -1060,8 +1061,13 @@ static noinline int free_debug_processing(struct kmem_cache *s, | |||
1060 | } | 1061 | } |
1061 | 1062 | ||
1062 | /* Special debug activities for freeing objects */ | 1063 | /* Special debug activities for freeing objects */ |
1063 | if (!page->frozen && !page->freelist) | 1064 | if (!page->frozen && !page->freelist) { |
1065 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | ||
1066 | |||
1067 | spin_lock(&n->list_lock); | ||
1064 | remove_full(s, page); | 1068 | remove_full(s, page); |
1069 | spin_unlock(&n->list_lock); | ||
1070 | } | ||
1065 | if (s->flags & SLAB_STORE_USER) | 1071 | if (s->flags & SLAB_STORE_USER) |
1066 | set_track(s, object, TRACK_FREE, addr); | 1072 | set_track(s, object, TRACK_FREE, addr); |
1067 | trace(s, page, object, 0); | 1073 | trace(s, page, object, 0); |
@@ -1170,7 +1176,8 @@ static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | |||
1170 | { return 1; } | 1176 | { return 1; } |
1171 | static inline int check_object(struct kmem_cache *s, struct page *page, | 1177 | static inline int check_object(struct kmem_cache *s, struct page *page, |
1172 | void *object, u8 val) { return 1; } | 1178 | void *object, u8 val) { return 1; } |
1173 | static inline void add_full(struct kmem_cache_node *n, struct page *page) {} | 1179 | static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, |
1180 | struct page *page) {} | ||
1174 | static inline unsigned long kmem_cache_flags(unsigned long objsize, | 1181 | static inline unsigned long kmem_cache_flags(unsigned long objsize, |
1175 | unsigned long flags, const char *name, | 1182 | unsigned long flags, const char *name, |
1176 | void (*ctor)(void *)) | 1183 | void (*ctor)(void *)) |
@@ -1420,38 +1427,33 @@ static __always_inline int slab_trylock(struct page *page) | |||
1420 | } | 1427 | } |
1421 | 1428 | ||
1422 | /* | 1429 | /* |
1423 | * Management of partially allocated slabs | 1430 | * Management of partially allocated slabs. |
1431 | * | ||
1432 | * list_lock must be held. | ||
1424 | */ | 1433 | */ |
1425 | static void add_partial(struct kmem_cache_node *n, | 1434 | static inline void add_partial(struct kmem_cache_node *n, |
1426 | struct page *page, int tail) | 1435 | struct page *page, int tail) |
1427 | { | 1436 | { |
1428 | spin_lock(&n->list_lock); | ||
1429 | n->nr_partial++; | 1437 | n->nr_partial++; |
1430 | if (tail) | 1438 | if (tail) |
1431 | list_add_tail(&page->lru, &n->partial); | 1439 | list_add_tail(&page->lru, &n->partial); |
1432 | else | 1440 | else |
1433 | list_add(&page->lru, &n->partial); | 1441 | list_add(&page->lru, &n->partial); |
1434 | spin_unlock(&n->list_lock); | ||
1435 | } | 1442 | } |
1436 | 1443 | ||
1437 | static inline void __remove_partial(struct kmem_cache_node *n, | 1444 | /* |
1445 | * list_lock must be held. | ||
1446 | */ | ||
1447 | static inline void remove_partial(struct kmem_cache_node *n, | ||
1438 | struct page *page) | 1448 | struct page *page) |
1439 | { | 1449 | { |
1440 | list_del(&page->lru); | 1450 | list_del(&page->lru); |
1441 | n->nr_partial--; | 1451 | n->nr_partial--; |
1442 | } | 1452 | } |
1443 | 1453 | ||
1444 | static void remove_partial(struct kmem_cache *s, struct page *page) | ||
1445 | { | ||
1446 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | ||
1447 | |||
1448 | spin_lock(&n->list_lock); | ||
1449 | __remove_partial(n, page); | ||
1450 | spin_unlock(&n->list_lock); | ||
1451 | } | ||
1452 | |||
1453 | /* | 1454 | /* |
1454 | * Lock slab and remove from the partial list. | 1455 | * Lock slab, remove from the partial list and put the object into the |
1456 | * per cpu freelist. | ||
1455 | * | 1457 | * |
1456 | * Must hold list_lock. | 1458 | * Must hold list_lock. |
1457 | */ | 1459 | */ |
@@ -1459,7 +1461,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, | |||
1459 | struct page *page) | 1461 | struct page *page) |
1460 | { | 1462 | { |
1461 | if (slab_trylock(page)) { | 1463 | if (slab_trylock(page)) { |
1462 | __remove_partial(n, page); | 1464 | remove_partial(n, page); |
1463 | return 1; | 1465 | return 1; |
1464 | } | 1466 | } |
1465 | return 0; | 1467 | return 0; |
@@ -1576,12 +1578,17 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
1576 | if (page->inuse) { | 1578 | if (page->inuse) { |
1577 | 1579 | ||
1578 | if (page->freelist) { | 1580 | if (page->freelist) { |
1581 | spin_lock(&n->list_lock); | ||
1579 | add_partial(n, page, tail); | 1582 | add_partial(n, page, tail); |
1583 | spin_unlock(&n->list_lock); | ||
1580 | stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); | 1584 | stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); |
1581 | } else { | 1585 | } else { |
1582 | stat(s, DEACTIVATE_FULL); | 1586 | stat(s, DEACTIVATE_FULL); |
1583 | if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER)) | 1587 | if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER)) { |
1584 | add_full(n, page); | 1588 | spin_lock(&n->list_lock); |
1589 | add_full(s, n, page); | ||
1590 | spin_unlock(&n->list_lock); | ||
1591 | } | ||
1585 | } | 1592 | } |
1586 | slab_unlock(page); | 1593 | slab_unlock(page); |
1587 | } else { | 1594 | } else { |
@@ -1597,7 +1604,9 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) | |||
1597 | * kmem_cache_shrink can reclaim any empty slabs from | 1604 | * kmem_cache_shrink can reclaim any empty slabs from |
1598 | * the partial list. | 1605 | * the partial list. |
1599 | */ | 1606 | */ |
1607 | spin_lock(&n->list_lock); | ||
1600 | add_partial(n, page, 1); | 1608 | add_partial(n, page, 1); |
1609 | spin_unlock(&n->list_lock); | ||
1601 | slab_unlock(page); | 1610 | slab_unlock(page); |
1602 | } else { | 1611 | } else { |
1603 | slab_unlock(page); | 1612 | slab_unlock(page); |
@@ -2099,7 +2108,11 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2099 | * then add it. | 2108 | * then add it. |
2100 | */ | 2109 | */ |
2101 | if (unlikely(!prior)) { | 2110 | if (unlikely(!prior)) { |
2111 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | ||
2112 | |||
2113 | spin_lock(&n->list_lock); | ||
2102 | add_partial(get_node(s, page_to_nid(page)), page, 1); | 2114 | add_partial(get_node(s, page_to_nid(page)), page, 1); |
2115 | spin_unlock(&n->list_lock); | ||
2103 | stat(s, FREE_ADD_PARTIAL); | 2116 | stat(s, FREE_ADD_PARTIAL); |
2104 | } | 2117 | } |
2105 | 2118 | ||
@@ -2113,7 +2126,11 @@ slab_empty: | |||
2113 | /* | 2126 | /* |
2114 | * Slab still on the partial list. | 2127 | * Slab still on the partial list. |
2115 | */ | 2128 | */ |
2116 | remove_partial(s, page); | 2129 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
2130 | |||
2131 | spin_lock(&n->list_lock); | ||
2132 | remove_partial(n, page); | ||
2133 | spin_unlock(&n->list_lock); | ||
2117 | stat(s, FREE_REMOVE_PARTIAL); | 2134 | stat(s, FREE_REMOVE_PARTIAL); |
2118 | } | 2135 | } |
2119 | slab_unlock(page); | 2136 | slab_unlock(page); |
@@ -2391,7 +2408,6 @@ static void early_kmem_cache_node_alloc(int node) | |||
2391 | { | 2408 | { |
2392 | struct page *page; | 2409 | struct page *page; |
2393 | struct kmem_cache_node *n; | 2410 | struct kmem_cache_node *n; |
2394 | unsigned long flags; | ||
2395 | 2411 | ||
2396 | BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); | 2412 | BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); |
2397 | 2413 | ||
@@ -2418,14 +2434,7 @@ static void early_kmem_cache_node_alloc(int node) | |||
2418 | init_kmem_cache_node(n, kmem_cache_node); | 2434 | init_kmem_cache_node(n, kmem_cache_node); |
2419 | inc_slabs_node(kmem_cache_node, node, page->objects); | 2435 | inc_slabs_node(kmem_cache_node, node, page->objects); |
2420 | 2436 | ||
2421 | /* | ||
2422 | * lockdep requires consistent irq usage for each lock | ||
2423 | * so even though there cannot be a race this early in | ||
2424 | * the boot sequence, we still disable irqs. | ||
2425 | */ | ||
2426 | local_irq_save(flags); | ||
2427 | add_partial(n, page, 0); | 2437 | add_partial(n, page, 0); |
2428 | local_irq_restore(flags); | ||
2429 | } | 2438 | } |
2430 | 2439 | ||
2431 | static void free_kmem_cache_nodes(struct kmem_cache *s) | 2440 | static void free_kmem_cache_nodes(struct kmem_cache *s) |
@@ -2709,7 +2718,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) | |||
2709 | spin_lock_irqsave(&n->list_lock, flags); | 2718 | spin_lock_irqsave(&n->list_lock, flags); |
2710 | list_for_each_entry_safe(page, h, &n->partial, lru) { | 2719 | list_for_each_entry_safe(page, h, &n->partial, lru) { |
2711 | if (!page->inuse) { | 2720 | if (!page->inuse) { |
2712 | __remove_partial(n, page); | 2721 | remove_partial(n, page); |
2713 | discard_slab(s, page); | 2722 | discard_slab(s, page); |
2714 | } else { | 2723 | } else { |
2715 | list_slab_objects(s, page, | 2724 | list_slab_objects(s, page, |
@@ -3047,7 +3056,7 @@ int kmem_cache_shrink(struct kmem_cache *s) | |||
3047 | * may have freed the last object and be | 3056 | * may have freed the last object and be |
3048 | * waiting to release the slab. | 3057 | * waiting to release the slab. |
3049 | */ | 3058 | */ |
3050 | __remove_partial(n, page); | 3059 | remove_partial(n, page); |
3051 | slab_unlock(page); | 3060 | slab_unlock(page); |
3052 | discard_slab(s, page); | 3061 | discard_slab(s, page); |
3053 | } else { | 3062 | } else { |