diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2013-01-21 03:01:25 -0500 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2013-04-02 02:42:10 -0400 |
commit | 633b076464da52b3c7bf0f62932fbfc0ea23d8b3 (patch) | |
tree | 546927d08f30ea3049051b89a55b7c7a56937f7f /mm | |
parent | 7d557b3cb69398d83ceabad9cf147c93a3aa97fd (diff) |
slub: correct to calculate num of acquired objects in get_partial_node()
There is a subtle bug when calculating a number of acquired objects.
Currently, we calculate "available = page->objects - page->inuse",
after acquire_slab() is called in get_partial_node().
In acquire_slab() with mode = 1, we always set new.inuse = page->objects.
So,
acquire_slab(s, n, page, object == NULL);
if (!object) {
c->page = page;
stat(s, ALLOC_FROM_PARTIAL);
object = t;
available = page->objects - page->inuse;
!!! availabe is always 0 !!!
...
Therfore, "available > s->cpu_partial / 2" is always false and
we always go to second iteration.
This patch correct this problem.
After that, we don't need return value of put_cpu_partial().
So remove it.
Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Acked-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 17 |
1 files changed, 9 insertions, 8 deletions
@@ -1493,7 +1493,7 @@ static inline void remove_partial(struct kmem_cache_node *n, | |||
1493 | */ | 1493 | */ |
1494 | static inline void *acquire_slab(struct kmem_cache *s, | 1494 | static inline void *acquire_slab(struct kmem_cache *s, |
1495 | struct kmem_cache_node *n, struct page *page, | 1495 | struct kmem_cache_node *n, struct page *page, |
1496 | int mode) | 1496 | int mode, int *objects) |
1497 | { | 1497 | { |
1498 | void *freelist; | 1498 | void *freelist; |
1499 | unsigned long counters; | 1499 | unsigned long counters; |
@@ -1507,6 +1507,7 @@ static inline void *acquire_slab(struct kmem_cache *s, | |||
1507 | freelist = page->freelist; | 1507 | freelist = page->freelist; |
1508 | counters = page->counters; | 1508 | counters = page->counters; |
1509 | new.counters = counters; | 1509 | new.counters = counters; |
1510 | *objects = new.objects - new.inuse; | ||
1510 | if (mode) { | 1511 | if (mode) { |
1511 | new.inuse = page->objects; | 1512 | new.inuse = page->objects; |
1512 | new.freelist = NULL; | 1513 | new.freelist = NULL; |
@@ -1528,7 +1529,7 @@ static inline void *acquire_slab(struct kmem_cache *s, | |||
1528 | return freelist; | 1529 | return freelist; |
1529 | } | 1530 | } |
1530 | 1531 | ||
1531 | static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); | 1532 | static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); |
1532 | static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); | 1533 | static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); |
1533 | 1534 | ||
1534 | /* | 1535 | /* |
@@ -1539,6 +1540,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, | |||
1539 | { | 1540 | { |
1540 | struct page *page, *page2; | 1541 | struct page *page, *page2; |
1541 | void *object = NULL; | 1542 | void *object = NULL; |
1543 | int available = 0; | ||
1544 | int objects; | ||
1542 | 1545 | ||
1543 | /* | 1546 | /* |
1544 | * Racy check. If we mistakenly see no partial slabs then we | 1547 | * Racy check. If we mistakenly see no partial slabs then we |
@@ -1552,22 +1555,21 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, | |||
1552 | spin_lock(&n->list_lock); | 1555 | spin_lock(&n->list_lock); |
1553 | list_for_each_entry_safe(page, page2, &n->partial, lru) { | 1556 | list_for_each_entry_safe(page, page2, &n->partial, lru) { |
1554 | void *t; | 1557 | void *t; |
1555 | int available; | ||
1556 | 1558 | ||
1557 | if (!pfmemalloc_match(page, flags)) | 1559 | if (!pfmemalloc_match(page, flags)) |
1558 | continue; | 1560 | continue; |
1559 | 1561 | ||
1560 | t = acquire_slab(s, n, page, object == NULL); | 1562 | t = acquire_slab(s, n, page, object == NULL, &objects); |
1561 | if (!t) | 1563 | if (!t) |
1562 | break; | 1564 | break; |
1563 | 1565 | ||
1566 | available += objects; | ||
1564 | if (!object) { | 1567 | if (!object) { |
1565 | c->page = page; | 1568 | c->page = page; |
1566 | stat(s, ALLOC_FROM_PARTIAL); | 1569 | stat(s, ALLOC_FROM_PARTIAL); |
1567 | object = t; | 1570 | object = t; |
1568 | available = page->objects - page->inuse; | ||
1569 | } else { | 1571 | } else { |
1570 | available = put_cpu_partial(s, page, 0); | 1572 | put_cpu_partial(s, page, 0); |
1571 | stat(s, CPU_PARTIAL_NODE); | 1573 | stat(s, CPU_PARTIAL_NODE); |
1572 | } | 1574 | } |
1573 | if (kmem_cache_debug(s) || available > s->cpu_partial / 2) | 1575 | if (kmem_cache_debug(s) || available > s->cpu_partial / 2) |
@@ -1946,7 +1948,7 @@ static void unfreeze_partials(struct kmem_cache *s, | |||
1946 | * If we did not find a slot then simply move all the partials to the | 1948 | * If we did not find a slot then simply move all the partials to the |
1947 | * per node partial list. | 1949 | * per node partial list. |
1948 | */ | 1950 | */ |
1949 | static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | 1951 | static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) |
1950 | { | 1952 | { |
1951 | struct page *oldpage; | 1953 | struct page *oldpage; |
1952 | int pages; | 1954 | int pages; |
@@ -1984,7 +1986,6 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1984 | page->next = oldpage; | 1986 | page->next = oldpage; |
1985 | 1987 | ||
1986 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); | 1988 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); |
1987 | return pobjects; | ||
1988 | } | 1989 | } |
1989 | 1990 | ||
1990 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) | 1991 | static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |