aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e517d435e5d..c3f05e1599c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -34,6 +34,8 @@
34 34
35#include <trace/events/kmem.h> 35#include <trace/events/kmem.h>
36 36
37#include "internal.h"
38
37/* 39/*
38 * Lock order: 40 * Lock order:
39 * 1. slab_mutex (Global Mutex) 41 * 1. slab_mutex (Global Mutex)
@@ -1354,6 +1356,8 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1354 inc_slabs_node(s, page_to_nid(page), page->objects); 1356 inc_slabs_node(s, page_to_nid(page), page->objects);
1355 page->slab = s; 1357 page->slab = s;
1356 __SetPageSlab(page); 1358 __SetPageSlab(page);
1359 if (page->pfmemalloc)
1360 SetPageSlabPfmemalloc(page);
1357 1361
1358 start = page_address(page); 1362 start = page_address(page);
1359 1363
@@ -1397,6 +1401,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1397 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, 1401 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1398 -pages); 1402 -pages);
1399 1403
1404 __ClearPageSlabPfmemalloc(page);
1400 __ClearPageSlab(page); 1405 __ClearPageSlab(page);
1401 reset_page_mapcount(page); 1406 reset_page_mapcount(page);
1402 if (current->reclaim_state) 1407 if (current->reclaim_state)
@@ -2126,6 +2131,14 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2126 return freelist; 2131 return freelist;
2127} 2132}
2128 2133
2134static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2135{
2136 if (unlikely(PageSlabPfmemalloc(page)))
2137 return gfp_pfmemalloc_allowed(gfpflags);
2138
2139 return true;
2140}
2141
2129/* 2142/*
2130 * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist 2143 * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
2131 * or deactivate the page. 2144 * or deactivate the page.
@@ -2206,6 +2219,18 @@ redo:
2206 goto new_slab; 2219 goto new_slab;
2207 } 2220 }
2208 2221
2222 /*
2223 * By rights, we should be searching for a slab page that was
2224 * PFMEMALLOC but right now, we are losing the pfmemalloc
2225 * information when the page leaves the per-cpu allocator
2226 */
2227 if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2228 deactivate_slab(s, page, c->freelist);
2229 c->page = NULL;
2230 c->freelist = NULL;
2231 goto new_slab;
2232 }
2233
2209 /* must check again c->freelist in case of cpu migration or IRQ */ 2234 /* must check again c->freelist in case of cpu migration or IRQ */
2210 freelist = c->freelist; 2235 freelist = c->freelist;
2211 if (freelist) 2236 if (freelist)
@@ -2312,8 +2337,8 @@ redo:
2312 2337
2313 object = c->freelist; 2338 object = c->freelist;
2314 page = c->page; 2339 page = c->page;
2315 if (unlikely(!object || !node_match(page, node))) 2340 if (unlikely(!object || !node_match(page, node) ||
2316 2341 !pfmemalloc_match(page, gfpflags)))
2317 object = __slab_alloc(s, gfpflags, node, addr, c); 2342 object = __slab_alloc(s, gfpflags, node, addr, c);
2318 2343
2319 else { 2344 else {