aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2010-08-09 20:19:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-09 23:44:59 -0400
commit33906bc5c87b50028364405ec425de9638afc719 (patch)
treed5d6f431bd517a4a914972f3ce968dc99de73694 /mm/vmscan.c
parentc6a8a8c589b53f90854a07db3b5806ce111e826b (diff)
vmscan: tracing: add trace events for kswapd wakeup, sleeping and direct reclaim
Add two trace events for kswapd waking up and going asleep for the purposes of tracking kswapd activity and two trace events for direct reclaim beginning and ending. The information can be used to work out how much time a process or the system is spending on the reclamation of pages and in the case of direct reclaim, how many pages were reclaimed for that process. High frequency triggering of these events could point to memory pressure problems. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Larry Woodman <lwoodman@redhat.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Chris Mason <chris.mason@oracle.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Michael Rubin <mrubin@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6dafa45d79e4..c99bc418c4cf 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -48,6 +48,9 @@
48 48
49#include "internal.h" 49#include "internal.h"
50 50
51#define CREATE_TRACE_POINTS
52#include <trace/events/vmscan.h>
53
51struct scan_control { 54struct scan_control {
52 /* Incremented by the number of inactive pages that were scanned */ 55 /* Incremented by the number of inactive pages that were scanned */
53 unsigned long nr_scanned; 56 unsigned long nr_scanned;
@@ -1883,6 +1886,7 @@ out:
1883unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 1886unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1884 gfp_t gfp_mask, nodemask_t *nodemask) 1887 gfp_t gfp_mask, nodemask_t *nodemask)
1885{ 1888{
1889 unsigned long nr_reclaimed;
1886 struct scan_control sc = { 1890 struct scan_control sc = {
1887 .gfp_mask = gfp_mask, 1891 .gfp_mask = gfp_mask,
1888 .may_writepage = !laptop_mode, 1892 .may_writepage = !laptop_mode,
@@ -1895,7 +1899,15 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1895 .nodemask = nodemask, 1899 .nodemask = nodemask,
1896 }; 1900 };
1897 1901
1898 return do_try_to_free_pages(zonelist, &sc); 1902 trace_mm_vmscan_direct_reclaim_begin(order,
1903 sc.may_writepage,
1904 gfp_mask);
1905
1906 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
1907
1908 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
1909
1910 return nr_reclaimed;
1899} 1911}
1900 1912
1901#ifdef CONFIG_CGROUP_MEM_RES_CTLR 1913#ifdef CONFIG_CGROUP_MEM_RES_CTLR
@@ -2294,9 +2306,10 @@ static int kswapd(void *p)
2294 * premature sleep. If not, then go fully 2306 * premature sleep. If not, then go fully
2295 * to sleep until explicitly woken up 2307 * to sleep until explicitly woken up
2296 */ 2308 */
2297 if (!sleeping_prematurely(pgdat, order, remaining)) 2309 if (!sleeping_prematurely(pgdat, order, remaining)) {
2310 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2298 schedule(); 2311 schedule();
2299 else { 2312 } else {
2300 if (remaining) 2313 if (remaining)
2301 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 2314 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2302 else 2315 else
@@ -2316,8 +2329,10 @@ static int kswapd(void *p)
2316 * We can speed up thawing tasks if we don't call balance_pgdat 2329 * We can speed up thawing tasks if we don't call balance_pgdat
2317 * after returning from the refrigerator 2330 * after returning from the refrigerator
2318 */ 2331 */
2319 if (!ret) 2332 if (!ret) {
2333 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
2320 balance_pgdat(pgdat, order); 2334 balance_pgdat(pgdat, order);
2335 }
2321 } 2336 }
2322 return 0; 2337 return 0;
2323} 2338}
@@ -2337,6 +2352,7 @@ void wakeup_kswapd(struct zone *zone, int order)
2337 return; 2352 return;
2338 if (pgdat->kswapd_max_order < order) 2353 if (pgdat->kswapd_max_order < order)
2339 pgdat->kswapd_max_order = order; 2354 pgdat->kswapd_max_order = order;
2355 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
2340 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2356 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2341 return; 2357 return;
2342 if (!waitqueue_active(&pgdat->kswapd_wait)) 2358 if (!waitqueue_active(&pgdat->kswapd_wait))