aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2018-10-26 18:06:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-26 19:26:32 -0400
commiteb414681d5a07d28d2ff90dc05f69ec6b232ebd2 (patch)
tree69e37010954e597b404709ecd9a11b9f7373cf0f /mm/page_alloc.c
parent246b3b3342c9b0a2e24cda2178be87bc36e1c874 (diff)
psi: pressure stall information for CPU, memory, and IO
When systems are overcommitted and resources become contended, it's hard to tell exactly the impact this has on workload productivity, or how close the system is to lockups and OOM kills. In particular, when machines work multiple jobs concurrently, the impact of overcommit in terms of latency and throughput on the individual job can be enormous. In order to maximize hardware utilization without sacrificing individual job health or risk complete machine lockups, this patch implements a way to quantify resource pressure in the system. A kernel built with CONFIG_PSI=y creates files in /proc/pressure/ that expose the percentage of time the system is stalled on CPU, memory, or IO, respectively. Stall states are aggregate versions of the per-task delay accounting delays: cpu: some tasks are runnable but not executing on a CPU memory: tasks are reclaiming, or waiting for swapin or thrashing cache io: tasks are waiting for io completions These percentages of walltime can be thought of as pressure percentages, and they give a general sense of system health and productivity loss incurred by resource overcommit. They can also indicate when the system is approaching lockup scenarios and OOMs. To do this, psi keeps track of the task states associated with each CPU and samples the time they spend in stall states. Every 2 seconds, the samples are averaged across CPUs - weighted by the CPUs' non-idle time to eliminate artifacts from unused CPUs - and translated into percentages of walltime. A running average of those percentages is maintained over 10s, 1m, and 5m periods (similar to the loadaverage). [hannes@cmpxchg.org: doc fixlet, per Randy] Link: http://lkml.kernel.org/r/20180828205625.GA14030@cmpxchg.org [hannes@cmpxchg.org: code optimization] Link: http://lkml.kernel.org/r/20180907175015.GA8479@cmpxchg.org [hannes@cmpxchg.org: rename psi_clock() to psi_update_work(), per Peter] Link: http://lkml.kernel.org/r/20180907145404.GB11088@cmpxchg.org [hannes@cmpxchg.org: fix build] Link: http://lkml.kernel.org/r/20180913014222.GA2370@cmpxchg.org Link: http://lkml.kernel.org/r/20180828172258.3185-9-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Daniel Drake <drake@endlessm.com> Tested-by: Suren Baghdasaryan <surenb@google.com> Cc: Christopher Lameter <cl@linux.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Johannes Weiner <jweiner@fb.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Enderborg <peter.enderborg@sony.com> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Shakeel Butt <shakeelb@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vinayak Menon <vinmenon@codeaurora.org> Cc: Randy Dunlap <rdunlap@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 20f25d06c00c..f97b5a1700a4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -66,6 +66,7 @@
66#include <linux/ftrace.h> 66#include <linux/ftrace.h>
67#include <linux/lockdep.h> 67#include <linux/lockdep.h>
68#include <linux/nmi.h> 68#include <linux/nmi.h>
69#include <linux/psi.h>
69 70
70#include <asm/sections.h> 71#include <asm/sections.h>
71#include <asm/tlbflush.h> 72#include <asm/tlbflush.h>
@@ -3549,15 +3550,20 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3549 enum compact_priority prio, enum compact_result *compact_result) 3550 enum compact_priority prio, enum compact_result *compact_result)
3550{ 3551{
3551 struct page *page; 3552 struct page *page;
3553 unsigned long pflags;
3552 unsigned int noreclaim_flag; 3554 unsigned int noreclaim_flag;
3553 3555
3554 if (!order) 3556 if (!order)
3555 return NULL; 3557 return NULL;
3556 3558
3559 psi_memstall_enter(&pflags);
3557 noreclaim_flag = memalloc_noreclaim_save(); 3560 noreclaim_flag = memalloc_noreclaim_save();
3561
3558 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3562 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3559 prio); 3563 prio);
3564
3560 memalloc_noreclaim_restore(noreclaim_flag); 3565 memalloc_noreclaim_restore(noreclaim_flag);
3566 psi_memstall_leave(&pflags);
3561 3567
3562 if (*compact_result <= COMPACT_INACTIVE) 3568 if (*compact_result <= COMPACT_INACTIVE)
3563 return NULL; 3569 return NULL;
@@ -3756,11 +3762,13 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3756 struct reclaim_state reclaim_state; 3762 struct reclaim_state reclaim_state;
3757 int progress; 3763 int progress;
3758 unsigned int noreclaim_flag; 3764 unsigned int noreclaim_flag;
3765 unsigned long pflags;
3759 3766
3760 cond_resched(); 3767 cond_resched();
3761 3768
3762 /* We now go into synchronous reclaim */ 3769 /* We now go into synchronous reclaim */
3763 cpuset_memory_pressure_bump(); 3770 cpuset_memory_pressure_bump();
3771 psi_memstall_enter(&pflags);
3764 fs_reclaim_acquire(gfp_mask); 3772 fs_reclaim_acquire(gfp_mask);
3765 noreclaim_flag = memalloc_noreclaim_save(); 3773 noreclaim_flag = memalloc_noreclaim_save();
3766 reclaim_state.reclaimed_slab = 0; 3774 reclaim_state.reclaimed_slab = 0;
@@ -3772,6 +3780,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3772 current->reclaim_state = NULL; 3780 current->reclaim_state = NULL;
3773 memalloc_noreclaim_restore(noreclaim_flag); 3781 memalloc_noreclaim_restore(noreclaim_flag);
3774 fs_reclaim_release(gfp_mask); 3782 fs_reclaim_release(gfp_mask);
3783 psi_memstall_leave(&pflags);
3775 3784
3776 cond_resched(); 3785 cond_resched();
3777 3786