summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-03-03 04:13:38 -0500
committerIngo Molnar <mingo@kernel.org>2017-08-10 06:29:03 -0400
commitd92a8cfcb37ecd1315269dab741f073b63b3a8b6 (patch)
treebd081c6309a48d23ae074e2035105be13622b005 /mm/page_alloc.c
parenta9668cd6ee288c4838bc668880ac085be551cac2 (diff)
locking/lockdep: Rework FS_RECLAIM annotation
A while ago someone, and I cannot find the email just now, asked if we could not implement the RECLAIM_FS inversion stuff with a 'fake' lock like we use for other things like workqueues etc. I think this should be possible which allows reducing the 'irq' states and will reduce the amount of __bfs() lookups we do. Removing the 1 IRQ state results in 4 less __bfs() walks per dependency, improving lockdep performance. And by moving this annotation out of the lockdep code it becomes easier for the mm people to extend. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Byungchul Park <byungchul.park@lge.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@kernel.org> Cc: Nikolay Borisov <nborisov@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: akpm@linux-foundation.org Cc: boqun.feng@gmail.com Cc: iamjoonsoo.kim@lge.com Cc: kernel-team@lge.com Cc: kirill@shutemov.name Cc: npiggin@gmail.com Cc: walken@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c49
1 files changed, 46 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fc32aa81f359..c20d89601802 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -66,6 +66,7 @@
66#include <linux/kthread.h> 66#include <linux/kthread.h>
67#include <linux/memcontrol.h> 67#include <linux/memcontrol.h>
68#include <linux/ftrace.h> 68#include <linux/ftrace.h>
69#include <linux/lockdep.h>
69 70
70#include <asm/sections.h> 71#include <asm/sections.h>
71#include <asm/tlbflush.h> 72#include <asm/tlbflush.h>
@@ -3490,6 +3491,47 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
3490} 3491}
3491#endif /* CONFIG_COMPACTION */ 3492#endif /* CONFIG_COMPACTION */
3492 3493
3494#ifdef CONFIG_LOCKDEP
3495struct lockdep_map __fs_reclaim_map =
3496 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
3497
3498static bool __need_fs_reclaim(gfp_t gfp_mask)
3499{
3500 gfp_mask = current_gfp_context(gfp_mask);
3501
3502 /* no reclaim without waiting on it */
3503 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
3504 return false;
3505
3506 /* this guy won't enter reclaim */
3507 if ((current->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
3508 return false;
3509
3510 /* We're only interested __GFP_FS allocations for now */
3511 if (!(gfp_mask & __GFP_FS))
3512 return false;
3513
3514 if (gfp_mask & __GFP_NOLOCKDEP)
3515 return false;
3516
3517 return true;
3518}
3519
3520void fs_reclaim_acquire(gfp_t gfp_mask)
3521{
3522 if (__need_fs_reclaim(gfp_mask))
3523 lock_map_acquire(&__fs_reclaim_map);
3524}
3525EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
3526
3527void fs_reclaim_release(gfp_t gfp_mask)
3528{
3529 if (__need_fs_reclaim(gfp_mask))
3530 lock_map_release(&__fs_reclaim_map);
3531}
3532EXPORT_SYMBOL_GPL(fs_reclaim_release);
3533#endif
3534
3493/* Perform direct synchronous page reclaim */ 3535/* Perform direct synchronous page reclaim */
3494static int 3536static int
3495__perform_reclaim(gfp_t gfp_mask, unsigned int order, 3537__perform_reclaim(gfp_t gfp_mask, unsigned int order,
@@ -3504,7 +3546,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3504 /* We now go into synchronous reclaim */ 3546 /* We now go into synchronous reclaim */
3505 cpuset_memory_pressure_bump(); 3547 cpuset_memory_pressure_bump();
3506 noreclaim_flag = memalloc_noreclaim_save(); 3548 noreclaim_flag = memalloc_noreclaim_save();
3507 lockdep_set_current_reclaim_state(gfp_mask); 3549 fs_reclaim_acquire(gfp_mask);
3508 reclaim_state.reclaimed_slab = 0; 3550 reclaim_state.reclaimed_slab = 0;
3509 current->reclaim_state = &reclaim_state; 3551 current->reclaim_state = &reclaim_state;
3510 3552
@@ -3512,7 +3554,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3512 ac->nodemask); 3554 ac->nodemask);
3513 3555
3514 current->reclaim_state = NULL; 3556 current->reclaim_state = NULL;
3515 lockdep_clear_current_reclaim_state(); 3557 fs_reclaim_release(gfp_mask);
3516 memalloc_noreclaim_restore(noreclaim_flag); 3558 memalloc_noreclaim_restore(noreclaim_flag);
3517 3559
3518 cond_resched(); 3560 cond_resched();
@@ -4041,7 +4083,8 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4041 *alloc_flags |= ALLOC_CPUSET; 4083 *alloc_flags |= ALLOC_CPUSET;
4042 } 4084 }
4043 4085
4044 lockdep_trace_alloc(gfp_mask); 4086 fs_reclaim_acquire(gfp_mask);
4087 fs_reclaim_release(gfp_mask);
4045 4088
4046 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); 4089 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4047 4090