aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6dafa45d79e4..c99bc418c4cf 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -48,6 +48,9 @@
48 48
49#include "internal.h" 49#include "internal.h"
50 50
51#define CREATE_TRACE_POINTS
52#include <trace/events/vmscan.h>
53
51struct scan_control { 54struct scan_control {
52 /* Incremented by the number of inactive pages that were scanned */ 55 /* Incremented by the number of inactive pages that were scanned */
53 unsigned long nr_scanned; 56 unsigned long nr_scanned;
@@ -1883,6 +1886,7 @@ out:
1883unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 1886unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1884 gfp_t gfp_mask, nodemask_t *nodemask) 1887 gfp_t gfp_mask, nodemask_t *nodemask)
1885{ 1888{
1889 unsigned long nr_reclaimed;
1886 struct scan_control sc = { 1890 struct scan_control sc = {
1887 .gfp_mask = gfp_mask, 1891 .gfp_mask = gfp_mask,
1888 .may_writepage = !laptop_mode, 1892 .may_writepage = !laptop_mode,
@@ -1895,7 +1899,15 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1895 .nodemask = nodemask, 1899 .nodemask = nodemask,
1896 }; 1900 };
1897 1901
1898 return do_try_to_free_pages(zonelist, &sc); 1902 trace_mm_vmscan_direct_reclaim_begin(order,
1903 sc.may_writepage,
1904 gfp_mask);
1905
1906 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
1907
1908 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
1909
1910 return nr_reclaimed;
1899} 1911}
1900 1912
1901#ifdef CONFIG_CGROUP_MEM_RES_CTLR 1913#ifdef CONFIG_CGROUP_MEM_RES_CTLR
@@ -2294,9 +2306,10 @@ static int kswapd(void *p)
2294 * premature sleep. If not, then go fully 2306 * premature sleep. If not, then go fully
2295 * to sleep until explicitly woken up 2307 * to sleep until explicitly woken up
2296 */ 2308 */
2297 if (!sleeping_prematurely(pgdat, order, remaining)) 2309 if (!sleeping_prematurely(pgdat, order, remaining)) {
2310 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2298 schedule(); 2311 schedule();
2299 else { 2312 } else {
2300 if (remaining) 2313 if (remaining)
2301 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); 2314 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2302 else 2315 else
@@ -2316,8 +2329,10 @@ static int kswapd(void *p)
2316 * We can speed up thawing tasks if we don't call balance_pgdat 2329 * We can speed up thawing tasks if we don't call balance_pgdat
2317 * after returning from the refrigerator 2330 * after returning from the refrigerator
2318 */ 2331 */
2319 if (!ret) 2332 if (!ret) {
2333 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
2320 balance_pgdat(pgdat, order); 2334 balance_pgdat(pgdat, order);
2335 }
2321 } 2336 }
2322 return 0; 2337 return 0;
2323} 2338}
@@ -2337,6 +2352,7 @@ void wakeup_kswapd(struct zone *zone, int order)
2337 return; 2352 return;
2338 if (pgdat->kswapd_max_order < order) 2353 if (pgdat->kswapd_max_order < order)
2339 pgdat->kswapd_max_order = order; 2354 pgdat->kswapd_max_order = order;
2355 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
2340 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2356 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2341 return; 2357 return;
2342 if (!waitqueue_active(&pgdat->kswapd_wait)) 2358 if (!waitqueue_active(&pgdat->kswapd_wait))