diff options
author | Joonsoo Kim <js1304@gmail.com> | 2012-09-17 17:09:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-09-17 18:00:38 -0400 |
commit | 8ba00bb68a067c221cc3ea3a0293e8fcbdcb7ba1 (patch) | |
tree | 6642f9819b79c51891d7f829c5cf1f384ffc6cc5 /mm/slub.c | |
parent | d014dc2ed4fae84cb92509416c8bfc9078d4f0d9 (diff) |
slub: consider pfmemalloc_match() in get_partial_node()
get_partial() is currently not checking pfmemalloc_match() meaning that
it is possible for pfmemalloc pages to leak to non-pfmemalloc users.
This is a problem in the following situation. Assume that there is a
request from normal allocation and there are no objects in the per-cpu
cache and no node-partial slab.
In this case, slab_alloc enters the slow path and new_slab_objects() is
called which may return a PFMEMALLOC page. As the current user is not
allowed to access PFMEMALLOC page, deactivate_slab() is called
([5091b74a: mm: slub: optimise the SLUB fast path to avoid pfmemalloc
checks]) and returns an object from PFMEMALLOC page.
Next time, when we get another request from normal allocation,
slab_alloc() enters the slow-path and calls new_slab_objects(). In
new_slab_objects(), we call get_partial() and get a partial slab which
was just deactivated but is a pfmemalloc page. We extract one object
from it and re-deactivate.
"deactivate -> re-get in get_partial -> re-deactivate" occures repeatedly.
As a result, access to PFMEMALLOC page is not properly restricted and it
can cause a performance degradation due to frequent deactivation.
deactivation frequently.
This patch changes get_partial_node() to take pfmemalloc_match() into
account and prevents the "deactivate -> re-get in get_partial()
scenario. Instead, new_slab() is called.
Signed-off-by: Joonsoo Kim <js1304@gmail.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Cc: David Miller <davem@davemloft.net>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 15 |
1 files changed, 10 insertions, 5 deletions
@@ -1524,12 +1524,13 @@ static inline void *acquire_slab(struct kmem_cache *s, | |||
1524 | } | 1524 | } |
1525 | 1525 | ||
1526 | static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); | 1526 | static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); |
1527 | static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); | ||
1527 | 1528 | ||
1528 | /* | 1529 | /* |
1529 | * Try to allocate a partial slab from a specific node. | 1530 | * Try to allocate a partial slab from a specific node. |
1530 | */ | 1531 | */ |
1531 | static void *get_partial_node(struct kmem_cache *s, | 1532 | static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, |
1532 | struct kmem_cache_node *n, struct kmem_cache_cpu *c) | 1533 | struct kmem_cache_cpu *c, gfp_t flags) |
1533 | { | 1534 | { |
1534 | struct page *page, *page2; | 1535 | struct page *page, *page2; |
1535 | void *object = NULL; | 1536 | void *object = NULL; |
@@ -1545,9 +1546,13 @@ static void *get_partial_node(struct kmem_cache *s, | |||
1545 | 1546 | ||
1546 | spin_lock(&n->list_lock); | 1547 | spin_lock(&n->list_lock); |
1547 | list_for_each_entry_safe(page, page2, &n->partial, lru) { | 1548 | list_for_each_entry_safe(page, page2, &n->partial, lru) { |
1548 | void *t = acquire_slab(s, n, page, object == NULL); | 1549 | void *t; |
1549 | int available; | 1550 | int available; |
1550 | 1551 | ||
1552 | if (!pfmemalloc_match(page, flags)) | ||
1553 | continue; | ||
1554 | |||
1555 | t = acquire_slab(s, n, page, object == NULL); | ||
1551 | if (!t) | 1556 | if (!t) |
1552 | break; | 1557 | break; |
1553 | 1558 | ||
@@ -1614,7 +1619,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, | |||
1614 | 1619 | ||
1615 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && | 1620 | if (n && cpuset_zone_allowed_hardwall(zone, flags) && |
1616 | n->nr_partial > s->min_partial) { | 1621 | n->nr_partial > s->min_partial) { |
1617 | object = get_partial_node(s, n, c); | 1622 | object = get_partial_node(s, n, c, flags); |
1618 | if (object) { | 1623 | if (object) { |
1619 | /* | 1624 | /* |
1620 | * Return the object even if | 1625 | * Return the object even if |
@@ -1643,7 +1648,7 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, | |||
1643 | void *object; | 1648 | void *object; |
1644 | int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; | 1649 | int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; |
1645 | 1650 | ||
1646 | object = get_partial_node(s, get_node(s, searchnode), c); | 1651 | object = get_partial_node(s, get_node(s, searchnode), c, flags); |
1647 | if (object || node != NUMA_NO_NODE) | 1652 | if (object || node != NUMA_NO_NODE) |
1648 | return object; | 1653 | return object; |
1649 | 1654 | ||